diff --git a/.gitignore b/.gitignore
index 2cec9aa..4201a47 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,4 +8,6 @@ data_volume/
tmpAudio/
.idea
-*.iml
\ No newline at end of file
+*.iml
+
+variables.json
\ No newline at end of file
diff --git a/cloudformation/jambonz-mini.yaml b/cloudformation/jambonz-mini.yaml
index 8008aed..92f12f2 100644
--- a/cloudformation/jambonz-mini.yaml
+++ b/cloudformation/jambonz-mini.yaml
@@ -95,7 +95,7 @@ Conditions:
Mappings:
AWSRegion2AMI:
us-east-1:
- Ami: ami-07ae487129ffcf70c
+ Ami: ami-00d771ac0ee2a4774
Resources:
IamCloudwatchRole:
@@ -181,7 +181,7 @@ Resources:
INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)"
AWS_REGION_NAME="$(curl -s http://169.254.169.254/latest/meta-data/placement/region)"
- # change the database password to the instance id
+ # change the database password to a random id
NEW_DB_PASSWD="$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)"
echo "alter user 'admin'@'%' identified by '$NEW_DB_PASSWD'" | mysql -h localhost -u admin -D jambones -pJambonzR0ck$
sudo sed -i -e "s/\(.*\)JAMBONES_MYSQL_PASSWORD.*/\1JAMBONES_MYSQL_PASSWORD: '$NEW_DB_PASSWD',/g" /home/admin/apps/ecosystem.config.js
diff --git a/packer/jambonz-mini/README.md b/packer/jambonz-mini/aws/README.md
similarity index 100%
rename from packer/jambonz-mini/README.md
rename to packer/jambonz-mini/aws/README.md
diff --git a/packer/jambonz-mini/files/20auto-upgrades b/packer/jambonz-mini/aws/files/20auto-upgrades
similarity index 100%
rename from packer/jambonz-mini/files/20auto-upgrades
rename to packer/jambonz-mini/aws/files/20auto-upgrades
diff --git a/packer/jambonz-mini/files/Makefile.am.extra b/packer/jambonz-mini/aws/files/Makefile.am.extra
similarity index 100%
rename from packer/jambonz-mini/files/Makefile.am.extra
rename to packer/jambonz-mini/aws/files/Makefile.am.extra
diff --git a/packer/jambonz-mini/files/Makefile.am.grpc.patch b/packer/jambonz-mini/aws/files/Makefile.am.grpc.patch
similarity index 100%
rename from packer/jambonz-mini/files/Makefile.am.grpc.patch
rename to packer/jambonz-mini/aws/files/Makefile.am.grpc.patch
diff --git a/packer/jambonz-mini/files/Makefile.am.patch b/packer/jambonz-mini/aws/files/Makefile.am.patch
similarity index 100%
rename from packer/jambonz-mini/files/Makefile.am.patch
rename to packer/jambonz-mini/aws/files/Makefile.am.patch
diff --git a/packer/jambonz-mini/files/Makefile.nuance b/packer/jambonz-mini/aws/files/Makefile.nuance
similarity index 100%
rename from packer/jambonz-mini/files/Makefile.nuance
rename to packer/jambonz-mini/aws/files/Makefile.nuance
diff --git a/packer/jambonz-mini/files/SpeechSDK-Linux-1.26.0.tar.gz b/packer/jambonz-mini/aws/files/SpeechSDK-Linux-1.26.0.tar.gz
similarity index 100%
rename from packer/jambonz-mini/files/SpeechSDK-Linux-1.26.0.tar.gz
rename to packer/jambonz-mini/aws/files/SpeechSDK-Linux-1.26.0.tar.gz
diff --git a/packer/jambonz-mini/files/acl.conf.xml b/packer/jambonz-mini/aws/files/acl.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/acl.conf.xml
rename to packer/jambonz-mini/aws/files/acl.conf.xml
diff --git a/packer/jambonz-mini/files/apiban.logrotate b/packer/jambonz-mini/aws/files/apiban.logrotate
similarity index 100%
rename from packer/jambonz-mini/files/apiban.logrotate
rename to packer/jambonz-mini/aws/files/apiban.logrotate
diff --git a/packer/jambonz-mini/files/avmd.conf.xml b/packer/jambonz-mini/aws/files/avmd.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/avmd.conf.xml
rename to packer/jambonz-mini/aws/files/avmd.conf.xml
diff --git a/packer/jambonz-mini/files/cloudwatch-config.json b/packer/jambonz-mini/aws/files/cloudwatch-config.json
similarity index 100%
rename from packer/jambonz-mini/files/cloudwatch-config.json
rename to packer/jambonz-mini/aws/files/cloudwatch-config.json
diff --git a/packer/jambonz-mini/files/conference.conf.xml b/packer/jambonz-mini/aws/files/conference.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/conference.conf.xml
rename to packer/jambonz-mini/aws/files/conference.conf.xml
diff --git a/packer/jambonz-mini/files/config.json b/packer/jambonz-mini/aws/files/config.json
similarity index 100%
rename from packer/jambonz-mini/files/config.json
rename to packer/jambonz-mini/aws/files/config.json
diff --git a/packer/jambonz-mini/files/configure.ac.extra b/packer/jambonz-mini/aws/files/configure.ac.extra
similarity index 100%
rename from packer/jambonz-mini/files/configure.ac.extra
rename to packer/jambonz-mini/aws/files/configure.ac.extra
diff --git a/packer/jambonz-mini/files/configure.ac.grpc.patch b/packer/jambonz-mini/aws/files/configure.ac.grpc.patch
similarity index 100%
rename from packer/jambonz-mini/files/configure.ac.grpc.patch
rename to packer/jambonz-mini/aws/files/configure.ac.grpc.patch
diff --git a/packer/jambonz-mini/files/configure.ac.patch b/packer/jambonz-mini/aws/files/configure.ac.patch
similarity index 100%
rename from packer/jambonz-mini/files/configure.ac.patch
rename to packer/jambonz-mini/aws/files/configure.ac.patch
diff --git a/packer/jambonz-mini/files/drachtio-5070.conf.xml b/packer/jambonz-mini/aws/files/drachtio-5070.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/drachtio-5070.conf.xml
rename to packer/jambonz-mini/aws/files/drachtio-5070.conf.xml
diff --git a/packer/jambonz-mini/files/drachtio-5070.gcp.service b/packer/jambonz-mini/aws/files/drachtio-5070.gcp.service
similarity index 100%
rename from packer/jambonz-mini/files/drachtio-5070.gcp.service
rename to packer/jambonz-mini/aws/files/drachtio-5070.gcp.service
diff --git a/packer/jambonz-mini/files/drachtio-5070.service b/packer/jambonz-mini/aws/files/drachtio-5070.service
similarity index 100%
rename from packer/jambonz-mini/files/drachtio-5070.service
rename to packer/jambonz-mini/aws/files/drachtio-5070.service
diff --git a/packer/jambonz-mini/files/drachtio-fail2ban.conf b/packer/jambonz-mini/aws/files/drachtio-fail2ban.conf
similarity index 100%
rename from packer/jambonz-mini/files/drachtio-fail2ban.conf
rename to packer/jambonz-mini/aws/files/drachtio-fail2ban.conf
diff --git a/packer/jambonz-mini/files/drachtio.conf.xml b/packer/jambonz-mini/aws/files/drachtio.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/drachtio.conf.xml
rename to packer/jambonz-mini/aws/files/drachtio.conf.xml
diff --git a/packer/jambonz-mini/files/drachtio.gcp.service b/packer/jambonz-mini/aws/files/drachtio.gcp.service
similarity index 100%
rename from packer/jambonz-mini/files/drachtio.gcp.service
rename to packer/jambonz-mini/aws/files/drachtio.gcp.service
diff --git a/packer/jambonz-mini/files/drachtio.service b/packer/jambonz-mini/aws/files/drachtio.service
similarity index 100%
rename from packer/jambonz-mini/files/drachtio.service
rename to packer/jambonz-mini/aws/files/drachtio.service
diff --git a/packer/jambonz-mini/files/ecosystem.config.js b/packer/jambonz-mini/aws/files/ecosystem.config.js
similarity index 100%
rename from packer/jambonz-mini/files/ecosystem.config.js
rename to packer/jambonz-mini/aws/files/ecosystem.config.js
diff --git a/packer/jambonz-mini/files/event_socket.conf.xml b/packer/jambonz-mini/aws/files/event_socket.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/event_socket.conf.xml
rename to packer/jambonz-mini/aws/files/event_socket.conf.xml
diff --git a/packer/jambonz-mini/files/freeswitch.service b/packer/jambonz-mini/aws/files/freeswitch.service
similarity index 100%
rename from packer/jambonz-mini/files/freeswitch.service
rename to packer/jambonz-mini/aws/files/freeswitch.service
diff --git a/packer/jambonz-mini/files/freeswitch_log_rotation b/packer/jambonz-mini/aws/files/freeswitch_log_rotation
similarity index 100%
rename from packer/jambonz-mini/files/freeswitch_log_rotation
rename to packer/jambonz-mini/aws/files/freeswitch_log_rotation
diff --git a/packer/jambonz-mini/files/grafana-dashboard-default.yaml b/packer/jambonz-mini/aws/files/grafana-dashboard-default.yaml
similarity index 100%
rename from packer/jambonz-mini/files/grafana-dashboard-default.yaml
rename to packer/jambonz-mini/aws/files/grafana-dashboard-default.yaml
diff --git a/packer/jambonz-mini/files/grafana-dashboard-heplify.json b/packer/jambonz-mini/aws/files/grafana-dashboard-heplify.json
similarity index 100%
rename from packer/jambonz-mini/files/grafana-dashboard-heplify.json
rename to packer/jambonz-mini/aws/files/grafana-dashboard-heplify.json
diff --git a/packer/jambonz-mini/files/grafana-dashboard-jambonz.json b/packer/jambonz-mini/aws/files/grafana-dashboard-jambonz.json
similarity index 100%
rename from packer/jambonz-mini/files/grafana-dashboard-jambonz.json
rename to packer/jambonz-mini/aws/files/grafana-dashboard-jambonz.json
diff --git a/packer/jambonz-mini/files/grafana-dashboard-servers.json b/packer/jambonz-mini/aws/files/grafana-dashboard-servers.json
similarity index 100%
rename from packer/jambonz-mini/files/grafana-dashboard-servers.json
rename to packer/jambonz-mini/aws/files/grafana-dashboard-servers.json
diff --git a/packer/jambonz-mini/files/grafana-datasource.yml b/packer/jambonz-mini/aws/files/grafana-datasource.yml
similarity index 100%
rename from packer/jambonz-mini/files/grafana-datasource.yml
rename to packer/jambonz-mini/aws/files/grafana-datasource.yml
diff --git a/packer/jambonz-mini/files/initialize-webapp-userdata.sh b/packer/jambonz-mini/aws/files/initialize-webapp-userdata.sh
similarity index 100%
rename from packer/jambonz-mini/files/initialize-webapp-userdata.sh
rename to packer/jambonz-mini/aws/files/initialize-webapp-userdata.sh
diff --git a/packer/jambonz-mini/files/jaeger.service b/packer/jambonz-mini/aws/files/jaeger.service
similarity index 100%
rename from packer/jambonz-mini/files/jaeger.service
rename to packer/jambonz-mini/aws/files/jaeger.service
diff --git a/packer/jambonz-mini/files/jambones-sql.sql b/packer/jambonz-mini/aws/files/jambones-sql.sql
similarity index 100%
rename from packer/jambonz-mini/files/jambones-sql.sql
rename to packer/jambonz-mini/aws/files/jambones-sql.sql
diff --git a/packer/jambonz-mini/files/mod_avmd.c.patch b/packer/jambonz-mini/aws/files/mod_avmd.c.patch
similarity index 100%
rename from packer/jambonz-mini/files/mod_avmd.c.patch
rename to packer/jambonz-mini/aws/files/mod_avmd.c.patch
diff --git a/packer/jambonz-mini/files/mod_httapi.c.patch b/packer/jambonz-mini/aws/files/mod_httapi.c.patch
similarity index 100%
rename from packer/jambonz-mini/files/mod_httapi.c.patch
rename to packer/jambonz-mini/aws/files/mod_httapi.c.patch
diff --git a/packer/jambonz-mini/files/mod_opusfile.c.patch b/packer/jambonz-mini/aws/files/mod_opusfile.c.patch
similarity index 100%
rename from packer/jambonz-mini/files/mod_opusfile.c.patch
rename to packer/jambonz-mini/aws/files/mod_opusfile.c.patch
diff --git a/packer/jambonz-mini/files/modules.conf.in.extra b/packer/jambonz-mini/aws/files/modules.conf.in.extra
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.in.extra
rename to packer/jambonz-mini/aws/files/modules.conf.in.extra
diff --git a/packer/jambonz-mini/files/modules.conf.in.grpc.patch b/packer/jambonz-mini/aws/files/modules.conf.in.grpc.patch
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.in.grpc.patch
rename to packer/jambonz-mini/aws/files/modules.conf.in.grpc.patch
diff --git a/packer/jambonz-mini/files/modules.conf.in.patch b/packer/jambonz-mini/aws/files/modules.conf.in.patch
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.in.patch
rename to packer/jambonz-mini/aws/files/modules.conf.in.patch
diff --git a/packer/jambonz-mini/files/modules.conf.patch b/packer/jambonz-mini/aws/files/modules.conf.patch
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.patch
rename to packer/jambonz-mini/aws/files/modules.conf.patch
diff --git a/packer/jambonz-mini/files/modules.conf.vanilla.xml.extra b/packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.extra
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.vanilla.xml.extra
rename to packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.extra
diff --git a/packer/jambonz-mini/files/modules.conf.vanilla.xml.grpc b/packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.grpc
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.vanilla.xml.grpc
rename to packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.grpc
diff --git a/packer/jambonz-mini/files/modules.conf.vanilla.xml.grpc.patch b/packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.grpc.patch
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.vanilla.xml.grpc.patch
rename to packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.grpc.patch
diff --git a/packer/jambonz-mini/files/modules.conf.vanilla.xml.lws b/packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.lws
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.vanilla.xml.lws
rename to packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.lws
diff --git a/packer/jambonz-mini/files/modules.conf.vanilla.xml.patch b/packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.patch
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.vanilla.xml.patch
rename to packer/jambonz-mini/aws/files/modules.conf.vanilla.xml.patch
diff --git a/packer/jambonz-mini/files/modules.conf.xml b/packer/jambonz-mini/aws/files/modules.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/modules.conf.xml
rename to packer/jambonz-mini/aws/files/modules.conf.xml
diff --git a/packer/jambonz-mini/files/mrf_dialplan.xml b/packer/jambonz-mini/aws/files/mrf_dialplan.xml
similarity index 100%
rename from packer/jambonz-mini/files/mrf_dialplan.xml
rename to packer/jambonz-mini/aws/files/mrf_dialplan.xml
diff --git a/packer/jambonz-mini/files/mrf_sip_profile.xml b/packer/jambonz-mini/aws/files/mrf_sip_profile.xml
similarity index 100%
rename from packer/jambonz-mini/files/mrf_sip_profile.xml
rename to packer/jambonz-mini/aws/files/mrf_sip_profile.xml
diff --git a/packer/jambonz-mini/files/mysql-server.key b/packer/jambonz-mini/aws/files/mysql-server.key
similarity index 100%
rename from packer/jambonz-mini/files/mysql-server.key
rename to packer/jambonz-mini/aws/files/mysql-server.key
diff --git a/packer/jambonz-mini/files/nginx-badbots.filter b/packer/jambonz-mini/aws/files/nginx-badbots.filter
similarity index 100%
rename from packer/jambonz-mini/files/nginx-badbots.filter
rename to packer/jambonz-mini/aws/files/nginx-badbots.filter
diff --git a/packer/jambonz-mini/files/nginx-badbots.jail b/packer/jambonz-mini/aws/files/nginx-badbots.jail
similarity index 100%
rename from packer/jambonz-mini/files/nginx-badbots.jail
rename to packer/jambonz-mini/aws/files/nginx-badbots.jail
diff --git a/packer/jambonz-mini/files/nginx-nohome.jail b/packer/jambonz-mini/aws/files/nginx-nohome.jail
similarity index 100%
rename from packer/jambonz-mini/files/nginx-nohome.jail
rename to packer/jambonz-mini/aws/files/nginx-nohome.jail
diff --git a/packer/jambonz-mini/files/nginx-noproxy.filter b/packer/jambonz-mini/aws/files/nginx-noproxy.filter
similarity index 100%
rename from packer/jambonz-mini/files/nginx-noproxy.filter
rename to packer/jambonz-mini/aws/files/nginx-noproxy.filter
diff --git a/packer/jambonz-mini/files/nginx-noproxy.jail b/packer/jambonz-mini/aws/files/nginx-noproxy.jail
similarity index 100%
rename from packer/jambonz-mini/files/nginx-noproxy.jail
rename to packer/jambonz-mini/aws/files/nginx-noproxy.jail
diff --git a/packer/jambonz-mini/files/nginx-noscript.filter b/packer/jambonz-mini/aws/files/nginx-noscript.filter
similarity index 100%
rename from packer/jambonz-mini/files/nginx-noscript.filter
rename to packer/jambonz-mini/aws/files/nginx-noscript.filter
diff --git a/packer/jambonz-mini/files/nginx-noscript.jail b/packer/jambonz-mini/aws/files/nginx-noscript.jail
similarity index 100%
rename from packer/jambonz-mini/files/nginx-noscript.jail
rename to packer/jambonz-mini/aws/files/nginx-noscript.jail
diff --git a/packer/jambonz-mini/files/nginx.api b/packer/jambonz-mini/aws/files/nginx.api
similarity index 100%
rename from packer/jambonz-mini/files/nginx.api
rename to packer/jambonz-mini/aws/files/nginx.api
diff --git a/packer/jambonz-mini/files/nginx.default b/packer/jambonz-mini/aws/files/nginx.default
similarity index 100%
rename from packer/jambonz-mini/files/nginx.default
rename to packer/jambonz-mini/aws/files/nginx.default
diff --git a/packer/jambonz-mini/files/nginx.grafana b/packer/jambonz-mini/aws/files/nginx.grafana
similarity index 100%
rename from packer/jambonz-mini/files/nginx.grafana
rename to packer/jambonz-mini/aws/files/nginx.grafana
diff --git a/packer/jambonz-mini/files/nginx.homer b/packer/jambonz-mini/aws/files/nginx.homer
similarity index 100%
rename from packer/jambonz-mini/files/nginx.homer
rename to packer/jambonz-mini/aws/files/nginx.homer
diff --git a/packer/jambonz-mini/files/nginx.public-apps b/packer/jambonz-mini/aws/files/nginx.public-apps
similarity index 100%
rename from packer/jambonz-mini/files/nginx.public-apps
rename to packer/jambonz-mini/aws/files/nginx.public-apps
diff --git a/packer/jambonz-mini/files/rtpengine-recording.ini b/packer/jambonz-mini/aws/files/rtpengine-recording.ini
similarity index 100%
rename from packer/jambonz-mini/files/rtpengine-recording.ini
rename to packer/jambonz-mini/aws/files/rtpengine-recording.ini
diff --git a/packer/jambonz-mini/files/rtpengine-recording.service b/packer/jambonz-mini/aws/files/rtpengine-recording.service
similarity index 100%
rename from packer/jambonz-mini/files/rtpengine-recording.service
rename to packer/jambonz-mini/aws/files/rtpengine-recording.service
diff --git a/packer/jambonz-mini/files/rtpengine.gcp.service b/packer/jambonz-mini/aws/files/rtpengine.gcp.service
similarity index 100%
rename from packer/jambonz-mini/files/rtpengine.gcp.service
rename to packer/jambonz-mini/aws/files/rtpengine.gcp.service
diff --git a/packer/jambonz-mini/files/rtpengine.service b/packer/jambonz-mini/aws/files/rtpengine.service
similarity index 100%
rename from packer/jambonz-mini/files/rtpengine.service
rename to packer/jambonz-mini/aws/files/rtpengine.service
diff --git a/packer/jambonz-mini/files/switch.conf.xml b/packer/jambonz-mini/aws/files/switch.conf.xml
similarity index 100%
rename from packer/jambonz-mini/files/switch.conf.xml
rename to packer/jambonz-mini/aws/files/switch.conf.xml
diff --git a/packer/jambonz-mini/files/switch_core_media.c.patch b/packer/jambonz-mini/aws/files/switch_core_media.c.patch
similarity index 100%
rename from packer/jambonz-mini/files/switch_core_media.c.patch
rename to packer/jambonz-mini/aws/files/switch_core_media.c.patch
diff --git a/packer/jambonz-mini/files/switch_rtp.c.patch b/packer/jambonz-mini/aws/files/switch_rtp.c.patch
similarity index 100%
rename from packer/jambonz-mini/files/switch_rtp.c.patch
rename to packer/jambonz-mini/aws/files/switch_rtp.c.patch
diff --git a/packer/jambonz-mini/files/telegraf.conf b/packer/jambonz-mini/aws/files/telegraf.conf
similarity index 100%
rename from packer/jambonz-mini/files/telegraf.conf
rename to packer/jambonz-mini/aws/files/telegraf.conf
diff --git a/packer/jambonz-mini/files/vanilla_modules.conf.xml.patch b/packer/jambonz-mini/aws/files/vanilla_modules.conf.xml.patch
similarity index 100%
rename from packer/jambonz-mini/files/vanilla_modules.conf.xml.patch
rename to packer/jambonz-mini/aws/files/vanilla_modules.conf.xml.patch
diff --git a/packer/jambonz-mini/files/vimrc.local b/packer/jambonz-mini/aws/files/vimrc.local
similarity index 100%
rename from packer/jambonz-mini/files/vimrc.local
rename to packer/jambonz-mini/aws/files/vimrc.local
diff --git a/packer/jambonz-mini/scripts/install_apiban.sh b/packer/jambonz-mini/aws/scripts/install_apiban.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_apiban.sh
rename to packer/jambonz-mini/aws/scripts/install_apiban.sh
diff --git a/packer/jambonz-mini/aws/scripts/install_app.sh b/packer/jambonz-mini/aws/scripts/install_app.sh
new file mode 100755
index 0000000..a006af9
--- /dev/null
+++ b/packer/jambonz-mini/aws/scripts/install_app.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+VERSION=$1
+DB_USER=$2
+DB_PASS=$3
+
+cd /home/admin
+cp /tmp/ecosystem.config.js apps
+
+echo "building jambonz-feature-server.."
+cd /home/admin/apps/jambonz-feature-server && npm ci --unsafe-perm
+echo "building fsw-clear-old-calls.."
+cd /home/admin/apps/fsw-clear-old-calls && npm ci --unsafe-perm && sudo npm install -g .
+echo "building jambonz-api-server.."
+cd /home/admin/apps/jambonz-api-server && npm ci --unsafe-perm
+echo "building jambonz-webapp.."
+cd /home/admin/apps/jambonz-webapp && npm ci --unsafe-perm && npm run build
+echo "building sbc-sip-sidecar.."
+cd /home/admin/apps/sbc-sip-sidecar && npm ci --unsafe-perm
+echo "building sbc-inbound.."
+cd /home/admin/apps/sbc-inbound && npm ci --unsafe-perm
+echo "building sbc-outbound.."
+cd /home/admin/apps/sbc-outbound && npm ci --unsafe-perm
+echo "building sbc-call-router.."
+cd /home/admin/apps/sbc-call-router && npm ci --unsafe-perm
+echo "building jambonz-smpp-esme.."
+cd /home/admin/apps/jambonz-smpp-esme && npm ci --unsafe-perm
+echo "building sbc-rtpengine-sidecar.."
+cd /home/admin/apps/sbc-rtpengine-sidecar && npm ci --unsafe-perm
+
+sudo npm install -g pino-pretty pm2 pm2-logrotate gulp grunt
+sudo pm2 install pm2-logrotate
+
+echo "0 * * * * root fsw-clear-old-calls --password JambonzR0ck$ >> /var/log/fsw-clear-old-calls.log 2>&1" | sudo tee -a /etc/crontab
+echo "0 1 * * * root find /tmp -name \"*.mp3\" -mtime +2 -exec rm {} \; > /dev/null 2>&1" | sudo tee -a /etc/crontab
+
+sudo -u admin bash -c "pm2 install pm2-logrotate"
+sudo -u admin bash -c "pm2 set pm2-logrotate:max_size 1G"
+sudo -u admin bash -c "pm2 set pm2-logrotate:retain 5"
+sudo -u admin bash -c "pm2 set pm2-logrotate:compress true"
+
+sudo chown -R admin:admin /home/admin/apps
+
+sudo rm /home/admin/apps/jambonz-webapp/.env
+
+sudo snap install core
+sudo snap install --classic certbot
+sudo rm /usr/bin/certbot
+sudo ln -s /snap/bin/certbot /usr/bin/certbot
diff --git a/packer/jambonz-mini/scripts/install_chrony.sh b/packer/jambonz-mini/aws/scripts/install_chrony.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_chrony.sh
rename to packer/jambonz-mini/aws/scripts/install_chrony.sh
diff --git a/packer/jambonz-mini/scripts/install_cloudwatch.sh b/packer/jambonz-mini/aws/scripts/install_cloudwatch.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_cloudwatch.sh
rename to packer/jambonz-mini/aws/scripts/install_cloudwatch.sh
diff --git a/packer/jambonz-mini/scripts/install_drachtio.sh b/packer/jambonz-mini/aws/scripts/install_drachtio.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_drachtio.sh
rename to packer/jambonz-mini/aws/scripts/install_drachtio.sh
diff --git a/packer/jambonz-mini/scripts/install_fail2ban.sh b/packer/jambonz-mini/aws/scripts/install_fail2ban.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_fail2ban.sh
rename to packer/jambonz-mini/aws/scripts/install_fail2ban.sh
diff --git a/packer/jambonz-mini/scripts/install_freeswitch.sh b/packer/jambonz-mini/aws/scripts/install_freeswitch.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_freeswitch.sh
rename to packer/jambonz-mini/aws/scripts/install_freeswitch.sh
diff --git a/packer/jambonz-mini/scripts/install_grafana.sh b/packer/jambonz-mini/aws/scripts/install_grafana.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_grafana.sh
rename to packer/jambonz-mini/aws/scripts/install_grafana.sh
diff --git a/packer/jambonz-mini/scripts/install_homer.sh b/packer/jambonz-mini/aws/scripts/install_homer.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_homer.sh
rename to packer/jambonz-mini/aws/scripts/install_homer.sh
diff --git a/packer/jambonz-mini/scripts/install_influxdb.sh b/packer/jambonz-mini/aws/scripts/install_influxdb.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_influxdb.sh
rename to packer/jambonz-mini/aws/scripts/install_influxdb.sh
diff --git a/packer/jambonz-mini/scripts/install_jaeger.sh b/packer/jambonz-mini/aws/scripts/install_jaeger.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_jaeger.sh
rename to packer/jambonz-mini/aws/scripts/install_jaeger.sh
diff --git a/packer/jambonz-mini/scripts/install_mysql.sh b/packer/jambonz-mini/aws/scripts/install_mysql.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_mysql.sh
rename to packer/jambonz-mini/aws/scripts/install_mysql.sh
diff --git a/packer/jambonz-mini/scripts/install_nginx.sh b/packer/jambonz-mini/aws/scripts/install_nginx.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_nginx.sh
rename to packer/jambonz-mini/aws/scripts/install_nginx.sh
diff --git a/packer/jambonz-mini/scripts/install_node_red.sh b/packer/jambonz-mini/aws/scripts/install_node_red.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_node_red.sh
rename to packer/jambonz-mini/aws/scripts/install_node_red.sh
diff --git a/packer/jambonz-mini/scripts/install_nodejs.sh b/packer/jambonz-mini/aws/scripts/install_nodejs.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_nodejs.sh
rename to packer/jambonz-mini/aws/scripts/install_nodejs.sh
diff --git a/packer/jambonz-mini/scripts/install_os_tuning.sh b/packer/jambonz-mini/aws/scripts/install_os_tuning.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_os_tuning.sh
rename to packer/jambonz-mini/aws/scripts/install_os_tuning.sh
diff --git a/packer/jambonz-mini/scripts/install_postgresql.sh b/packer/jambonz-mini/aws/scripts/install_postgresql.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_postgresql.sh
rename to packer/jambonz-mini/aws/scripts/install_postgresql.sh
diff --git a/packer/jambonz-mini/scripts/install_redis.sh b/packer/jambonz-mini/aws/scripts/install_redis.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_redis.sh
rename to packer/jambonz-mini/aws/scripts/install_redis.sh
diff --git a/packer/jambonz-mini/scripts/install_rtpengine.sh b/packer/jambonz-mini/aws/scripts/install_rtpengine.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_rtpengine.sh
rename to packer/jambonz-mini/aws/scripts/install_rtpengine.sh
diff --git a/packer/jambonz-mini/scripts/install_telegraf.sh b/packer/jambonz-mini/aws/scripts/install_telegraf.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_telegraf.sh
rename to packer/jambonz-mini/aws/scripts/install_telegraf.sh
diff --git a/packer/jambonz-mini/template.json b/packer/jambonz-mini/aws/template.json
similarity index 99%
rename from packer/jambonz-mini/template.json
rename to packer/jambonz-mini/aws/template.json
index 3299442..8cb6239 100644
--- a/packer/jambonz-mini/template.json
+++ b/packer/jambonz-mini/aws/template.json
@@ -5,7 +5,7 @@
"ami_description": "jambonz all-in-one AMI",
"instance_type": "c6in.xlarge",
"drachtio_version": "v0.8.22",
- "jambonz_version": "v0.8.3",
+ "jambonz_version": "v0.8.3-3",
"jambonz_user": "admin",
"jambonz_password": "JambonzR0ck$",
"install_telegraf": "yes",
diff --git a/packer/jambonz-mini/gcp/README.md b/packer/jambonz-mini/gcp/README.md
new file mode 100644
index 0000000..7ad3f49
--- /dev/null
+++ b/packer/jambonz-mini/gcp/README.md
@@ -0,0 +1,102 @@
+# packer-jambonz-mini
+
+A [packer](https://www.packer.io/) template to build an AMI containing everything needed to run jambonz on a single EC2 instance. The base linux distro is Debian 11 (bullseye).
+
+## Installing
+
+```
+$ packer build -color=false template.json
+```
+
+### variables
+There are many variables that can be specified on the `packer build` command line; however defaults (which are shown below) are appropriate for building an "all in one" jambonz server, so you generally should not need to specify values.
+
+```
+"region": "us-east-1"
+```
+The region to create the AMI in
+
+```
+"ami_description": "EC2 AMI jambonz mini"
+```
+AMI description.
+
+```
+"instance_type": "t2.medium"
+```
+EC2 Instance type to use when building the AMI.
+
+```
+"install_drachtio": "true"
+```
+whether to install drachtio
+
+```
+"install_nodejs": "false",
+```
+whether to install Node.js
+
+```
+"install_rtpengine": "true",
+```
+whether to install rtpengine
+
+```
+"install_freeswitch": "true",
+```
+whether to install freeswitch
+
+```
+"install_drachtio_fail2ban": "true",
+```
+whether to install fail2ban with drachtio filter
+
+```
+"install_redis": "true",
+```
+whether to install redis
+
+```
+"drachtio_version": "v0.8.3"
+```
+drachtio tag or branch to build
+
+```
+"nodejs_version": "v10.16.2",
+```
+Node.js version to install
+
+```
+"freeswitch_bind_cloud_ip": "true"
+```
+If freeswitch is enabled, and cloud_provider is not none then this variable dictates whether freeswitch should bind its sip and rtp ports to the cloud public address (versus the local ipv4 address).
+
+```
+"mod_audio_fork_subprotocol": "audio.jambonz.org"
+```
+websocket subprotocol name used by freeswitch module mod_audio_fork
+
+```
+"mod_audio_fork_service_threads": "3",
+```
+number of libwebsocket service threads used by freeswitch module mod_audio_fork
+
+``
+"mod_audio_fork_buffer_secs": "2",
+```
+max number of seconds of audio to buffer by freeswitch module mod_audio_fork
+
+```
+"freeswitch_build_with_grpc:: "true"
+```
+whether to build support for google speech and text-to-speech services
+
+```
+"remove_source": "true"
+```
+whether to remove source build directories, or leave them on the instance
+
+```
+"cloud_provider": "aws"
+```
+Cloud provider the AMI will be built on.
diff --git a/packer/jambonz-mini/cloudbuild.yaml b/packer/jambonz-mini/gcp/cloudbuild.yaml
similarity index 100%
rename from packer/jambonz-mini/cloudbuild.yaml
rename to packer/jambonz-mini/gcp/cloudbuild.yaml
diff --git a/packer/jambonz-mini/gcp/files/20auto-upgrades b/packer/jambonz-mini/gcp/files/20auto-upgrades
new file mode 100644
index 0000000..f066dcb
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/20auto-upgrades
@@ -0,0 +1,2 @@
+APT::Periodic::Update-Package-Lists "0";
+APT::Periodic::Unattended-Upgrade "0";
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/Makefile.am.extra b/packer/jambonz-mini/gcp/files/Makefile.am.extra
new file mode 100644
index 0000000..7ba2f2e
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/Makefile.am.extra
@@ -0,0 +1,1056 @@
+EXTRA_DIST =
+SUBDIRS = . src build tests/unit
+AUTOMAKE_OPTIONS = foreign subdir-objects
+NAME = freeswitch
+
+if SYSTEM_APR
+AM_LIBAPR_CFLAGS := $(shell apr-1-config --cflags)
+AM_LIBAPR_CPPFLAGS := $(shell apr-1-config --cppflags --includes)
+AM_LIBAPR_LDFLAGS := $(shell apr-1-config --ldflags)
+AM_LIBAPR_LIBS := $(shell apr-1-config \--libs)
+AM_LIBAPR_LINKLIBTOOL := $(shell apr-1-config \--link-libtool)
+else
+AM_LIBAPR_CFLAGS := $(shell ./libs/apr/apr-1-config --cflags)
+AM_LIBAPR_CPPFLAGS := $(shell ./libs/apr/apr-1-config --cppflags --includes)
+AM_LIBAPR_LDFLAGS := $(shell ./libs/apr/apr-1-config --ldflags)
+AM_LIBAPR_LIBS := $(subst $(switch_builddir)/,,$(shell ./libs/apr/apr-1-config \--libs))
+endif
+if SYSTEM_APRUTIL
+AM_LIBAPU_CPPFLAGS := $(shell apu-1-config --includes)
+AM_LIBAPU_LDFLAGS := $(shell apu-1-config --ldflags)
+AM_LIBAPU_LIBS := $(shell apu-1-config \--libs)
+AM_LIBAPU_LINKLIBTOOL := $(shell apu-1-config \--link-libtool)
+else
+AM_LIBAPU_CPPFLAGS := $(shell ./libs/apr-util/apu-1-config --includes)
+AM_LIBAPU_LDFLAGS := $(shell ./libs/apr-util/apu-1-config --ldflags)
+AM_LIBAPU_LIBS := $(subst $(switch_builddir)/,,$(shell ./libs/apr-util/apu-1-config \--libs))
+endif
+
+
+AM_CFLAGS = $(SWITCH_AM_CFLAGS) $(SWITCH_ANSI_CFLAGS)
+AM_CPPFLAGS =
+AM_CPPFLAGS += -I$(switch_srcdir)/libs/libvpx
+AM_CPPFLAGS += $(SWITCH_AM_CXXFLAGS)
+AM_LDFLAGS = $(SWITCH_AM_LDFLAGS) $(AM_LIBAPR_LDFLAGS) $(AM_LIBAPU_LDFLAGS)
+
+DEFAULT_SOUNDS=en-us-callie-8000
+MY_DEFAULT_ARGS= --build=$(build) --host=$(host) --target=$(target) --prefix="$(prefix)" --exec_prefix="$(exec_prefix)" --libdir="$(libdir)"
+
+.INTERMEDIATE: -ldl -liconv -lpthread
+
+.DEFAULT: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @target=`echo $@ | sed -e 's|^.*-||'`; \
+ target_prefix=`echo $@ | sed -e 's|-.*$$||'`; \
+ sound_perfix=`echo $@ | sed -e 's|-.*||'`; \
+ moh_version=`cat $(switch_srcdir)/build/moh_version.txt`;\
+ full_sound_dir=`echo $@ | sed -e 's|^sounds||' | sed -e 's|^-||' | sed -e 's|-install$$||'`; \
+ test ! -z $$full_sound_dir || full_sound_dir=`echo $(DEFAULT_SOUNDS)`; \
+ base_sound_dir=`echo $$full_sound_dir | sed -e 's|-[^-]*000$$||' ` ;\
+ sounds_version=`grep $$base_sound_dir $(switch_srcdir)/build/sounds_version.txt | cut -d ' ' -f2`;\
+ soundfile=`echo freeswitch-sounds-$$full_sound_dir-$$moh_version.tar.gz`; \
+ echo $$full_sound_dir | grep music >/dev/null || soundfile=`echo freeswitch-sounds-$$full_sound_dir-$$sounds_version.tar.gz`; \
+ args="$@"; if test -z "$$args" || test "$${args#-l*}" = "$$args"; then \
+ if test "$$target_prefix" = "sounds"; then \
+ if test "$$target" = "install"; then \
+ $(GETSOUNDS) $$soundfile $(DESTDIR)$(soundsdir)/;\
+ else \
+ $(GETSOUNDS) $$soundfile ; \
+ fi; \
+ else \
+ if test "$$target" = "install"; then \
+ $(MAKE) $(AM_MAKEFLAGS) core_install && cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ; \
+ else \
+ if test "$$target" = "clean"; then \
+ cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ;\
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) core && cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ;\
+ fi; \
+ fi; \
+ fi; fi
+
+sounds: sounds-en-us-callie-8000
+sounds-install: sounds-en-us-callie-8000-install
+sounds-allison: sounds-en-us-allison-8000
+sounds-allison-install: sounds-en-us-allison-8000-install
+sounds-ru: sounds-ru-RU-elena-8000
+sounds-ru-install: sounds-ru-RU-elena-8000-install
+sounds-fr: sounds-fr-ca-june-8000
+sounds-fr-install: sounds-fr-ca-june-8000-install
+moh: sounds-music-8000
+moh-install: sounds-music-8000-install
+
+hd-sounds: sounds sounds-en-us-callie-16000
+hd-sounds-install: sounds-install sounds-en-us-callie-16000-install
+hd-sounds-allison: sounds-allison sounds-en-us-allison-16000
+hd-sounds-allison-install: sounds-allison-install sounds-en-us-allison-16000-install
+hd-sounds-ru: sounds-ru sounds-ru-RU-elena-16000
+hd-sounds-ru-install: sounds-ru-install sounds-ru-RU-elena-16000-install
+hd-sounds-fr: sounds-fr-ca-june-16000
+hd-sounds-fr-install: sounds-fr-ca-june-16000-install
+hd-moh: moh sounds-music-16000
+hd-moh-install: moh-install sounds-music-16000-install
+
+uhd-sounds: hd-sounds sounds-en-us-callie-32000
+uhd-sounds-install: hd-sounds-install sounds-en-us-callie-32000-install
+uhd-sounds-allison: hd-sounds-allison sounds-en-us-allison-32000
+uhd-sounds-allison-install: hd-sounds-allison-install sounds-en-us-allison-32000-install
+uhd-sounds-ru: hd-sounds-ru sounds-ru-RU-elena-32000
+uhd-sounds-ru-install: hd-sounds-ru-install sounds-ru-RU-elena-32000-install
+uhd-sounds-fr: sounds-fr-ca-june-32000
+uhd-sounds-fr-install: sounds-fr-ca-june-32000-install
+uhd-moh: hd-moh sounds-music-32000
+uhd-moh-install: hd-moh-install sounds-music-32000-install
+
+cd-sounds: uhd-sounds sounds-en-us-callie-48000
+cd-sounds-install: uhd-sounds-install sounds-en-us-callie-48000-install
+cd-sounds-allison: uhd-sounds-allison sounds-en-us-allison-48000
+cd-sounds-allison-install: uhd-sounds-allison-install sounds-en-us-allison-48000-install
+cd-sounds-ru: uhd-sounds-ru sounds-ru-RU-elena-48000
+cd-sounds-ru-install: uhd-sounds-ru-install sounds-ru-RU-elena-48000-install
+cd-sounds-fr: sounds-fr-ca-june-48000
+cd-sounds-fr-install: sounds-fr-ca-june-48000-install
+cd-moh: uhd-moh sounds-music-48000
+cd-moh-install: uhd-moh-install sounds-music-48000-install
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+all-recursive: libfreeswitch.la
+clean-recusive: clean_core
+install-recursive: install-libLTLIBRARIES install-binPROGRAMS
+
+CORE_CFLAGS = $(AM_LIBAPR_CFLAGS) $(AM_LIBAPR_CPPFLAGS)
+CORE_CFLAGS += $(AM_LIBAPU_CPPFLAGS)
+CORE_CFLAGS += -I$(switch_srcdir)/libs/srtp/include
+if ENABLE_LIBYUV
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libyuv/include
+CORE_CFLAGS += -DSWITCH_HAVE_YUV
+endif
+CORE_CFLAGS += -I$(switch_srcdir)/libs/srtp/crypto/include -Ilibs/srtp/crypto/include
+CORE_CFLAGS += $(SPANDSP_CFLAGS)
+if ENABLE_LIBVPX
+CORE_CFLAGS += -DSWITCH_HAVE_VPX
+endif
+
+APR_LIBS = $(AM_LIBAPU_LIBS) $(AM_LIBAPR_LIBS)
+CORE_LIBS=
+
+if ENABLE_LIBVPX
+CORE_LIBS += libs/libvpx/libvpx.a
+endif
+if SYSTEM_APRUTIL
+CORE_LIBS += $(AM_LIBAPU_LINKLIBTOOL)
+else
+CORE_LIBS += libs/apr-util/libaprutil-1.la
+endif
+if SYSTEM_APR
+CORE_LIBS += $(AM_LIBAPR_LINKLIBTOOL)
+else
+CORE_LIBS += libs/apr/libapr-1.la
+endif
+
+if ENABLE_SRTP
+CORE_CFLAGS += -DENABLE_SRTP
+CORE_LIBS += libs/srtp/libsrtp.la
+endif
+
+MOD_LINK = $(switch_srcdir)/libfreeswitch.la
+CLEANFILES = src/include/switch_version.h src/include/switch_swigable_cpp.h
+BUILT_SOURCES = src/mod/modules.inc src/include/switch_version.h src/include/switch_swigable_cpp.h
+
+if HAVE_ODBC
+CORE_CFLAGS += -DSWITCH_HAVE_ODBC $(ODBC_INC_FLAGS)
+endif
+
+if HAVE_PNG
+CORE_CFLAGS += -DSWITCH_HAVE_PNG $(LIBPNG_CFLAGS)
+endif
+
+if HAVE_FREETYPE
+CORE_CFLAGS += -DSWITCH_HAVE_FREETYPE $(LIBFREETYPE_CFLAGS)
+endif
+
+if HAVE_GUMBO
+CORE_CFLAGS += -DSWITCH_HAVE_GUMBO $(LIBGUMBO_CFLAGS)
+endif
+
+if HAVE_FVAD
+CORE_CFLAGS += -DSWITCH_HAVE_FVAD $(LIBFVAD_CFLAGS)
+endif
+
+# DH: LWS
+if HAVE_LWS
+CORE_CFLAGS += -DSWITCH_HAVE_LWS $(LWS_CFLAGS)
+LWS_LIBS += -lwebsockets
+endif
+
+# DH: GRPC
+if HAVE_GRPC
+CORE_CFLAGS += -DSWITCH_HAVE_GRPC $(GRPC_CFLAGS)
+GRPC_LIBS += -lgrpc++_reflection -lprotobuf
+endif
+
+##
+## libfreeswitch
+##
+noinst_LTLIBRARIES =
+if ENABLE_LIBYUV
+noinst_LTLIBRARIES += libfreeswitch_libyuv.la
+endif
+
+if ENABLE_LIBYUV
+libfreeswitch_libyuv_la_SOURCES = \
+libs/libyuv/source/compare.cc \
+libs/libyuv/source/compare_common.cc \
+libs/libyuv/source/compare_gcc.cc \
+libs/libyuv/source/compare_mmi.cc \
+libs/libyuv/source/compare_msa.cc \
+libs/libyuv/source/compare_neon64.cc \
+libs/libyuv/source/compare_neon.cc \
+libs/libyuv/source/compare_win.cc \
+libs/libyuv/source/convert_argb.cc \
+libs/libyuv/source/convert.cc \
+libs/libyuv/source/convert_from_argb.cc \
+libs/libyuv/source/convert_from.cc \
+libs/libyuv/source/convert_jpeg.cc \
+libs/libyuv/source/convert_to_argb.cc \
+libs/libyuv/source/convert_to_i420.cc \
+libs/libyuv/source/cpu_id.cc \
+libs/libyuv/source/mjpeg_decoder.cc \
+libs/libyuv/source/mjpeg_validate.cc \
+libs/libyuv/source/planar_functions.cc \
+libs/libyuv/source/rotate_any.cc \
+libs/libyuv/source/rotate_argb.cc \
+libs/libyuv/source/rotate.cc \
+libs/libyuv/source/rotate_common.cc \
+libs/libyuv/source/rotate_gcc.cc \
+libs/libyuv/source/rotate_mmi.cc \
+libs/libyuv/source/rotate_msa.cc \
+libs/libyuv/source/rotate_neon64.cc \
+libs/libyuv/source/rotate_neon.cc \
+libs/libyuv/source/rotate_win.cc \
+libs/libyuv/source/row_any.cc \
+libs/libyuv/source/row_common.cc \
+libs/libyuv/source/row_gcc.cc \
+libs/libyuv/source/row_mmi.cc \
+libs/libyuv/source/row_msa.cc \
+libs/libyuv/source/row_neon64.cc \
+libs/libyuv/source/row_neon.cc \
+libs/libyuv/source/row_win.cc \
+libs/libyuv/source/scale_any.cc \
+libs/libyuv/source/scale_argb.cc \
+libs/libyuv/source/scale.cc \
+libs/libyuv/source/scale_common.cc \
+libs/libyuv/source/scale_gcc.cc \
+libs/libyuv/source/scale_mmi.cc \
+libs/libyuv/source/scale_msa.cc \
+libs/libyuv/source/scale_neon64.cc \
+libs/libyuv/source/scale_neon.cc \
+libs/libyuv/source/scale_win.cc \
+libs/libyuv/source/video_common.cc
+
+
+libfreeswitch_libyuv_la_CPPFLAGS = -O2 -fomit-frame-pointer -Ilibs/libyuv/include
+CORE_LIBS+=libfreeswitch_libyuv.la
+endif
+
+if HAVE_GRPC
+GOOGLEAPIS_GENS_PATH = libs/googleapis/gens
+
+nodist_libfreeswitch_libgoogleapis_la_SOURCES = \
+libs/googleapis/gens/google/api/monitoring.grpc.pb.cc \
+libs/googleapis/gens/google/api/annotations.grpc.pb.cc \
+libs/googleapis/gens/google/api/http.pb.cc \
+libs/googleapis/gens/google/api/quota.pb.cc \
+libs/googleapis/gens/google/api/quota.grpc.pb.cc \
+libs/googleapis/gens/google/api/backend.grpc.pb.cc \
+libs/googleapis/gens/google/api/service.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitored_resource.pb.cc \
+libs/googleapis/gens/google/api/consumer.pb.cc \
+libs/googleapis/gens/google/api/annotations.pb.cc \
+libs/googleapis/gens/google/api/metric.pb.cc \
+libs/googleapis/gens/google/api/logging.pb.cc \
+libs/googleapis/gens/google/api/auth.grpc.pb.cc \
+libs/googleapis/gens/google/api/distribution.grpc.pb.cc \
+libs/googleapis/gens/google/api/label.grpc.pb.cc \
+libs/googleapis/gens/google/api/launch_stage.grpc.pb.cc \
+libs/googleapis/gens/google/api/launch_stage.pb.cc \
+libs/googleapis/gens/google/api/httpbody.grpc.pb.cc \
+libs/googleapis/gens/google/api/config_change.grpc.pb.cc \
+libs/googleapis/gens/google/api/logging.grpc.pb.cc \
+libs/googleapis/gens/google/api/context.pb.cc \
+libs/googleapis/gens/google/api/system_parameter.pb.cc \
+libs/googleapis/gens/google/api/distribution.pb.cc \
+libs/googleapis/gens/google/api/control.pb.cc \
+libs/googleapis/gens/google/api/consumer.grpc.pb.cc \
+libs/googleapis/gens/google/api/label.pb.cc \
+libs/googleapis/gens/google/api/documentation.pb.cc \
+libs/googleapis/gens/google/api/log.pb.cc \
+libs/googleapis/gens/google/api/usage.grpc.pb.cc \
+libs/googleapis/gens/google/api/backend.pb.cc \
+libs/googleapis/gens/google/api/control.grpc.pb.cc \
+libs/googleapis/gens/google/api/log.grpc.pb.cc \
+libs/googleapis/gens/google/api/source_info.grpc.pb.cc \
+libs/googleapis/gens/google/api/billing.pb.cc \
+libs/googleapis/gens/google/api/auth.pb.cc \
+libs/googleapis/gens/google/api/resource.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/check_error.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/check_error.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/distribution.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/distribution.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/http_request.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/operation.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/operation.pb.cc \
+libs/googleapis/gens/google/api/metric.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitored_resource.grpc.pb.cc \
+libs/googleapis/gens/google/api/http.grpc.pb.cc \
+libs/googleapis/gens/google/api/httpbody.pb.cc \
+libs/googleapis/gens/google/api/endpoint.pb.cc \
+libs/googleapis/gens/google/api/documentation.grpc.pb.cc \
+libs/googleapis/gens/google/api/system_parameter.grpc.pb.cc \
+libs/googleapis/gens/google/api/endpoint.grpc.pb.cc \
+libs/googleapis/gens/google/api/service.pb.cc \
+libs/googleapis/gens/google/api/source_info.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/resources.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/resources.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.grpc.pb.cc \
+libs/googleapis/gens/google/api/billing.grpc.pb.cc \
+libs/googleapis/gens/google/api/usage.pb.cc \
+libs/googleapis/gens/google/api/config_change.pb.cc \
+libs/googleapis/gens/google/api/context.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitoring.pb.cc \
+libs/googleapis/gens/google/api/field_behavior.pb.cc \
+libs/googleapis/gens/google/api/client.pb.cc \
+libs/googleapis/gens/google/rpc/error_details.grpc.pb.cc \
+libs/googleapis/gens/google/rpc/code.pb.cc \
+libs/googleapis/gens/google/rpc/status.pb.cc \
+libs/googleapis/gens/google/rpc/status.grpc.pb.cc \
+libs/googleapis/gens/google/rpc/error_details.pb.cc \
+libs/googleapis/gens/google/rpc/code.grpc.pb.cc \
+libs/googleapis/gens/google/longrunning/operations.grpc.pb.cc \
+libs/googleapis/gens/google/longrunning/operations.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/cloud_speech.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/cloud_speech.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/resource.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/resource.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.pb.cc \
+libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/http_request.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/log_severity.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/log_severity.pb.cc \
+libs/googleapis/gens/google/logging/type/http_request.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_metrics.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/log_entry.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_config.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_config.pb.cc \
+libs/googleapis/gens/google/logging/v2/log_entry.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_metrics.grpc.pb.cc \
+libs/googleapis/gens/google/type/date.grpc.pb.cc \
+libs/googleapis/gens/google/type/timeofday.pb.cc \
+libs/googleapis/gens/google/type/latlng.grpc.pb.cc \
+libs/googleapis/gens/google/type/money.pb.cc \
+libs/googleapis/gens/google/type/date.pb.cc \
+libs/googleapis/gens/google/type/postal_address.grpc.pb.cc \
+libs/googleapis/gens/google/type/dayofweek.grpc.pb.cc \
+libs/googleapis/gens/google/type/dayofweek.pb.cc \
+libs/googleapis/gens/google/type/timeofday.grpc.pb.cc \
+libs/googleapis/gens/google/type/color.pb.cc \
+libs/googleapis/gens/google/type/postal_address.pb.cc \
+libs/googleapis/gens/google/type/latlng.pb.cc \
+libs/googleapis/gens/google/type/color.grpc.pb.cc \
+libs/googleapis/gens/google/type/money.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/gcs.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/environment.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/fulfillment.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/validation_result.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.pb.cc
+
+libfreeswitch_libgoogleapis_la_CPPFLAGS = -I/usr/local/include -I$(GOOGLEAPIS_GENS_PATH) -std=c++17 -pthread
+
+# nuance asr
+NUANCE_GENS_PATH = libs/nuance-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libnuanceapis_la_SOURCES = \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/error_details.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status_code.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/error_details.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status_code.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/result.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/resource.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/recognizer.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/result.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/resource.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/recognizer.grpc.pb.cc
+
+libfreeswitch_libnuanceapis_la_CPPFLAGS = -I/usr/local/include -I$(NUANCE_GENS_PATH) -std=c++17 -pthread
+
+#nvidia asr
+NVIDIA_GENS_PATH = libs/riva-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libnvidiaapis_la_SOURCES = \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_asr.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_audio.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_asr.grpc.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_audio.grpc.pb.cc
+
+libfreeswitch_libnvidiaapis_la_CPPFLAGS = -I/usr/local/include -I$(NVIDIA_GENS_PATH) -std=c++17 -pthread
+
+#soniox asr
+SONIOX_GENS_PATH = libs/soniox-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libsonioxapis_la_SOURCES = \
+libs/soniox-asr-grpc-api/stubs/soniox/speech_service.pb.cc \
+libs/soniox-asr-grpc-api/stubs/soniox/speech_service.grpc.pb.cc
+
+libfreeswitch_libsonioxapis_la_CPPFLAGS = -I/usr/local/include -I$(SONIOX_GENS_PATH) -std=c++17 -pthread
+
+CORE_LIBS+=libfreeswitch_libgoogleapis.la libfreeswitch_libnuanceapis.la libfreeswitch_libnvidiaapis.la libfreeswitch_libsonioxapis.la
+noinst_LTLIBRARIES += libfreeswitch_libgoogleapis.la libfreeswitch_libnuanceapis.la libfreeswitch_libnvidiaapis.la libfreeswitch_libsonioxapis.la
+
+endif
+
+lib_LTLIBRARIES = libfreeswitch.la
+libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(SOFIA_SIP_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(GRPC_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS) $(SPANDSP_LIBS) $(SOFIA_SIP_LIBS)
+libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+if HAVE_PNG
+libfreeswitch_la_LIBADD += $(LIBPNG_LIBS)
+endif
+
+if HAVE_ODBC
+libfreeswitch_la_LDFLAGS += $(ODBC_LIB_FLAGS)
+endif
+
+if ENABLE_ZRTP
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libzrtp/third_party/bgaes
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libzrtp/third_party/bnlib
+CORE_CFLAGS += -isystem $(switch_srcdir)/libs/libzrtp/include
+ZRTP_LDFLAGS = -L$(switch_srcdir)/libs/libzrtp/third_party/bnlib
+ZRTP_LDFLAGS += -L$(switch_srcdir)/libs/libzrtp
+ZRTP_LIBS = -lbn -lzrtp
+libfreeswitch_la_LDFLAGS += $(ZRTP_LDFLAGS)
+libfreeswitch_la_LIBADD += $(ZRTP_LIBS)
+CORE_LIBS += libs/libzrtp/libzrtp.a
+LIBS += libs/libzrtp/third_party/bnlib/libbn.a
+endif
+
+library_includetestdir = $(includedir)/test
+library_includetest_HEADERS = \
+ src/include/test/switch_fct.h \
+ src/include/test/switch_test.h
+
+library_includedir = $(includedir)
+library_include_HEADERS = \
+ src/include/switch_am_config.h \
+ src/include/switch.h \
+ src/include/switch_apr.h \
+ src/include/switch_buffer.h \
+ src/include/switch_caller.h \
+ src/include/switch_channel.h \
+ src/include/switch_console.h \
+ src/include/switch_core_event_hook.h \
+ src/include/switch_scheduler.h \
+ src/include/switch_core.h \
+ src/include/switch_core_media.h \
+ src/include/switch_core_video.h \
+ src/include/switch_core_db.h \
+ src/include/switch_mprintf.h \
+ src/include/switch_config.h \
+ src/include/switch_event.h \
+ src/include/switch_frame.h \
+ src/include/switch_ivr.h \
+ src/include/switch_dso.h \
+ src/include/switch_loadable_module.h \
+ src/include/switch_module_interfaces.h \
+ src/include/switch_platform.h \
+ src/include/switch_resample.h \
+ src/include/switch_regex.h \
+ src/include/switch_types.h \
+ src/include/switch_utils.h \
+ src/include/switch_rtp.h \
+ src/include/switch_jitterbuffer.h \
+ src/include/switch_estimators.h \
+ src/include/switch_rtcp_frame.h \
+ src/include/switch_stun.h \
+ src/include/switch_nat.h \
+ src/include/switch_log.h \
+ src/include/switch_xml.h \
+ src/include/switch_xml_config.h \
+ src/include/switch_cpp.h \
+ src/include/switch_curl.h \
+ src/include/switch_cJSON.h \
+ src/include/switch_cJSON_Utils.h \
+ src/include/switch_json.h \
+ src/include/switch_utf8.h \
+ src/include/switch_msrp.h \
+ src/include/switch_vpx.h \
+ src/include/switch_vad.h \
+ libs/libteletone/src/libteletone_detect.h \
+ libs/libteletone/src/libteletone_generate.h \
+ libs/libteletone/src/libteletone.h \
+ src/include/switch_limit.h \
+ src/include/switch_odbc.h \
+ src/include/switch_hashtable.h \
+ src/include/switch_image.h
+
+nodist_libfreeswitch_la_SOURCES = \
+ src/include/switch_frame.h \
+ src/include/switch_swigable_cpp.h \
+ src/include/switch_version.h
+
+libfreeswitch_la_SOURCES = \
+ src/switch_apr.c \
+ src/switch_buffer.c \
+ src/switch_caller.c \
+ src/switch_channel.c \
+ src/switch_console.c \
+ src/switch_mprintf.c \
+ src/switch_core_media_bug.c \
+ src/switch_core_timer.c \
+ src/switch_core_asr.c \
+ src/switch_core_event_hook.c \
+ src/switch_core_speech.c \
+ src/switch_core_memory.c \
+ src/switch_core_codec.c \
+ src/switch_core_file.c \
+ src/switch_core_cert.c \
+ src/switch_core_hash.c \
+ src/switch_core_sqldb.c \
+ src/switch_core_session.c \
+ src/switch_core_directory.c \
+ src/switch_core_state_machine.c \
+ src/switch_core_io.c \
+ src/switch_core_rwlock.c \
+ src/switch_core_port_allocator.c \
+ src/switch_core.c \
+ src/switch_version.c \
+ src/switch_core_media.c \
+ src/switch_core_video.c \
+ src/switch_sdp.c \
+ src/switch_scheduler.c \
+ src/switch_core_db.c \
+ src/switch_dso.c \
+ src/switch_loadable_module.c \
+ src/switch_utils.c \
+ src/switch_event.c \
+ src/switch_resample.c \
+ src/switch_regex.c \
+ src/switch_rtp.c \
+ src/switch_jitterbuffer.c \
+ src/switch_estimators.c \
+ src/switch_ivr_bridge.c \
+ src/switch_ivr_originate.c \
+ src/switch_ivr_async.c \
+ src/switch_ivr_play_say.c \
+ src/switch_ivr_say.c \
+ src/switch_ivr_menu.c \
+ src/switch_ivr.c \
+ src/switch_stun.c \
+ src/switch_nat.c \
+ src/switch_log.c \
+ src/switch_xml.c \
+ src/switch_xml_config.c \
+ src/switch_config.c \
+ src/switch_time.c \
+ src/switch_odbc.c \
+ src/switch_limit.c \
+ src/g711.c \
+ src/switch_pcm.c \
+ src/switch_speex.c \
+ src/switch_profile.c \
+ src/cJSON.c \
+ src/cJSON_Utils.c \
+ src/switch_json.c \
+ src/switch_curl.c \
+ src/switch_hashtable.c\
+ src/switch_utf8.c \
+ src/switch_msrp.c \
+ src/switch_vad.c \
+ src/switch_vpx.c \
+ libs/libteletone/src/libteletone_detect.c \
+ libs/libteletone/src/libteletone_generate.c \
+ libs/miniupnpc/miniwget.c \
+ libs/miniupnpc/minixml.c \
+ libs/miniupnpc/igd_desc_parse.c \
+ libs/miniupnpc/minisoap.c \
+ libs/miniupnpc/miniupnpc.c \
+ libs/miniupnpc/upnpreplyparse.c \
+ libs/miniupnpc/upnpcommands.c \
+ libs/miniupnpc/minissdpc.c \
+ libs/miniupnpc/upnperrors.c \
+ libs/libnatpmp/natpmp.c \
+ libs/libnatpmp/getgateway.c
+
+if ENABLE_CPP
+libfreeswitch_la_SOURCES += src/switch_cpp.cpp
+endif
+
+$(libfreeswitch_la_SOURCES): $(CORE_LIBS) $(switch_builddir)/modules.conf
+
+src/include/switch_swigable_cpp.h: $(switch_srcdir)/src/include/switch_cpp.h
+ cat $(switch_srcdir)/src/include/switch_cpp.h | perl $(switch_srcdir)/build/strip.pl > $(switch_srcdir)/src/include/switch_swigable_cpp.h
+# $(CC) -E $(switch_srcdir)/src/include/switch_cpp.h \
+# -I$(switch_srcdir)/src/include -I$(switch_srcdir)/libs/libteletone/src \
+# -DSWITCH_DECLARE_CLASS= -DSWITCH_DECLARE\(x\)=x -DSWITCH_DECLARE_CONSTRUCTOR= \
+# -DSWITCH_DECLARE_NONSTD\(x\)=x 2>/dev/null | grep -v "^#" > src/include/switch_swigable_cpp.h
+
+##
+## Applications
+##
+bin_PROGRAMS = freeswitch fs_cli fs_ivrd tone2wav fs_encode fs_tts
+
+##
+## fs_cli ()
+##
+fs_cli_SOURCES = libs/esl/src/esl.c libs/esl/src/esl_config.c libs/esl/src/esl_event.c \
+ libs/esl/src/esl_threadmutex.c libs/esl/fs_cli.c libs/esl/src/esl_json.c libs/esl/src/esl_buffer.c libs/esl/src/cJSON.c libs/esl/src/cJSON_Utils.c
+fs_cli_CFLAGS = $(AM_CFLAGS) -I$(switch_srcdir)/libs/esl/src/include $(LIBEDIT_CFLAGS)
+fs_cli_LDFLAGS = $(AM_LDFLAGS) -lpthread $(ESL_LDFLAGS) -lm $(LIBEDIT_LIBS)
+
+if DISABLE_CC
+fs_cli_CFLAGS += -DDISABLE_CC
+endif
+
+##
+## fs_encode ()
+##
+fs_encode_SOURCES = src/fs_encode.c
+fs_encode_CFLAGS = $(AM_CFLAGS)
+fs_encode_LDFLAGS = $(AM_LDFLAGS)
+fs_encode_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+if HAVE_ODBC
+fs_encode_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+##
+## fs_tts ()
+##
+fs_tts_SOURCES = src/fs_tts.c
+fs_tts_CFLAGS = $(AM_CFLAGS)
+fs_tts_LDFLAGS = $(AM_LDFLAGS)
+fs_tts_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+##
+## tone2wav ()
+##
+tone2wav_SOURCES = src/tone2wav.c
+tone2wav_CFLAGS = $(AM_CFLAGS)
+tone2wav_LDFLAGS = $(AM_LDFLAGS)
+tone2wav_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+if HAVE_ODBC
+tone2wav_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+
+##
+## fs_ivrd ()
+##
+fs_ivrd_SOURCES = libs/esl/src/esl.c libs/esl/src/esl_config.c libs/esl/src/esl_event.c \
+ libs/esl/src/esl_threadmutex.c libs/esl/ivrd.c libs/esl/src/esl_json.c libs/esl/src/esl_buffer.c libs/esl/src/cJSON.c libs/esl/src/cJSON_Utils.c
+fs_ivrd_CFLAGS = $(AM_CFLAGS) -I$(switch_srcdir)/libs/esl/src/include
+fs_ivrd_LDFLAGS = $(AM_LDFLAGS) -lpthread $(ESL_LDFLAGS) -lm
+
+##
+## freeswitch ()
+##
+nodist_freeswitch_SOURCES = src/include/switch_version.h
+freeswitch_SOURCES = src/switch.c
+freeswitch_CFLAGS = $(AM_CFLAGS) $(CORE_CFLAGS)
+freeswitch_LDFLAGS = $(AM_LDFLAGS) -lpthread -rpath $(libdir)
+freeswitch_LDADD = libfreeswitch.la libs/apr/libapr-1.la
+
+if HAVE_ODBC
+freeswitch_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+
+##
+## Scripts
+##
+bin_SCRIPTS = scripts/gentls_cert scripts/fsxs
+
+scripts/fsxs: scripts/fsxs.in
+ @echo creating fsxs
+ @sed -e "s,@MODULES_DIR\@,@modulesdir@," \
+ -e "s,@LIB_DIR\@,@libdir@," \
+ -e "s,@BIN_DIR\@,@bindir@," \
+ -e "s,@INC_DIR\@,@includedir@," \
+ -e "s,@CFG_DIR\@,@confdir@," \
+ -e "s,@DB_DIR\@,@dbdir@," \
+ -e "s,@PREFIX\@,@prefix@," \
+ -e "s,@CC\@,$(CC)," \
+ -e "s,@LD\@,$(CC)," \
+ -e "s,@INSTALL\@,$(INSTALL)," \
+ -e "s,@MKINSTALLDIRS\@,$(mkdir_p)," \
+ \
+ -e "s|@CFLAGS\@|$(CFLAGS) `./libs/apr/apr-1-config --cflags --cppflags`|" \
+ -e "s|@INCLUDES\@|-I$(prefix)/include|" \
+ -e "s|@SOLINK\@|$(SOLINK)|" \
+ -e "s|@LDFLAGS\@|-L$(prefix)/lib|" \
+ -e "s|@LIBS\@||" \
+ $(top_srcdir)/scripts/fsxs.in > scripts/fsxs
+
+##
+## misc
+##
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = build/freeswitch.pc
+
+$(switch_builddir)/modules.conf:
+ if test -f $@; then touch $@; else cp $(switch_srcdir)/build/modules.conf.in $@ ;fi
+
+src/mod/modules.inc: $(switch_builddir)/modules.conf
+ @echo "OUR_MODULES=$(OUR_MODS)" > $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_CLEAN_MODULES=$(OUR_CLEAN_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_TEST_MODULES=$(OUR_TEST_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_CHECK_MODULES=$(OUR_CHECK_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_INSTALL_MODULES=$(OUR_INSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_UNINSTALL_MODULES=$(OUR_UNINSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_MODULES=$(OUR_DISABLED_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_CLEAN_MODULES=$(OUR_DISABLED_CLEAN_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_INSTALL_MODULES=$(OUR_DISABLED_INSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_UNINSTALL_MODULES=$(OUR_DISABLED_UNINSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+
+$(OUR_MODULES): $(switch_builddir)/modules.conf libfreeswitch.la src/mod/modules.inc
+ @set fnord $$MAKEFLAGS; amf=$$2; \
+ (cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ test -z "$$fail"
+
+$(switch_builddir)/build/print_git_revision: $(switch_srcdir)/build/print_git_revision.c
+ $(CC_FOR_BUILD) -o $@ $<
+
+src/switch_version.lo: src/include/switch_version.h
+
+src/include/switch_version.h: src/include/switch_version.h.in Makefile $(switch_builddir)/build/print_git_revision $(libfreeswitch_la_SOURCES) $(library_include_HEADERS)
+ @cat $< > $@; \
+ if [ -d $(switch_srcdir)/.git ] && [ -n "$$(which git)" ]; then \
+ xver="$$(cd $(switch_srcdir)/ && $(switch_builddir)/build/print_git_revision)"; \
+ xhver="$$(cd $(switch_srcdir)/ && $(switch_builddir)/build/print_git_revision -h)"; \
+ sed \
+ -e "/#define *SWITCH_VERSION_REVISION[^a-zA-Z0-9_]/{s/\"\([^\"]*\)\"/\"\1$$xver\"/;}" \
+ -e "/#define *SWITCH_VERSION_REVISION_HUMAN[^a-zA-Z0-9_]/{s/\"\([^\"]*\)\"/\"\1$$xhver\"/;}" \
+ $< > $@; \
+ fi;
+
+##
+## Dependency targets
+##
+libs/libedit/src/.libs/libedit.a:
+ cd libs/libedit && $(MAKE)
+
+libs/libzrtp/libzrtp.a:
+ cd libs/libzrtp && $(MAKE)
+
+libs/libvpx/Makefile: libs/libvpx/.update
+ cd libs/libvpx && CC="$(CC)" CXX="$(CXX)" CFLAGS="$(CFLAGS) $(VISIBILITY_FLAG)" CXXFLAGS="$(CXXFLAGS)" LDFLAGS="$(LDFLAGS)" ./configure --enable-pic --disable-docs --disable-examples --disable-install-bins --disable-install-srcs --disable-unit-tests --size-limit=16384x16384
+
+libs/libvpx/libvpx.a: libs/libvpx/Makefile libs/libvpx/.update
+ @cd libs/libvpx && $(MAKE)
+
+libs/apr/Makefile: libs/apr/Makefile.in libs/apr/config.status libs/apr libs/apr/.update
+ @cd libs/apr && ./config.status
+ @$(TOUCH_TARGET)
+
+libs/apr/libapr-1.la: libs/apr/Makefile libs/apr/.update
+ @if [ $(MAKELEVEL) = 0 -o -z "`echo "$(MAKEARGS)" | grep "j"`" ] ; then touch $(switch_srcdir)/src/include/switch.h; cd libs/apr && $(MAKE) $(MFLAGS) && touch libapr-1.la; fi
+
+libs/apr-util/libaprutil-1.la: libs/apr/libapr-1.la libs/apr-util libs/apr-util/.update
+ @if [ $(MAKELEVEL) = 0 -o -z "`echo "$(MAKEARGS)" | grep "j"`" ] ; then touch $(switch_srcdir)/src/include/switch.h; cd libs/apr-util && $(MAKE) $(MFLAGS) && touch libaprutil-1.la; fi
+
+SRTP_SRC = libs/srtp/srtp/srtp.c libs/srtp/srtp/ekt.c libs/srtp/crypto/cipher/cipher.c libs/srtp/crypto/cipher/null_cipher.c \
+ libs/srtp/crypto/cipher/aes.c libs/srtp/crypto/cipher/aes_icm.c \
+ libs/srtp/crypto/hash/null_auth.c libs/srtp/crypto/hash/sha1.c \
+ libs/srtp/crypto/hash/hmac.c libs/srtp/crypto/hash/auth.c \
+ libs/srtp/crypto/math/datatypes.c libs/srtp/crypto/math/stat.c \
+ libs/srtp/crypto/kernel/crypto_kernel.c libs/srtp/crypto/kernel/alloc.c \
+ libs/srtp/crypto/kernel/key.c libs/srtp/crypto/kernel/err.c \
+ libs/srtp/crypto/replay/rdb.c libs/srtp/crypto/replay/rdbx.c libs/srtp/crypto/replay/ut_sim.c
+
+libs/srtp/libsrtp.la: libs/srtp libs/srtp/.update $(SRTP_SRC)
+ touch $(switch_srcdir)/src/include/switch.h
+ @cd libs/srtp && $(MAKE)
+ @$(TOUCH_TARGET)
+
+##
+## helper targets
+##
+yaml-files:
+ @echo `mkdir $(DESTDIR)$(confdir)/yaml 2>/dev/null`
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/default/yaml/*.yaml $(DESTDIR)$(confdir)/yaml
+
+vm-sync:
+ test -d $(DESTDIR)$(confdir) || $(mkinstalldirs) $(DESTDIR)$(confdir)
+ test -d $(DESTDIR)$(confdir)/lang || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang
+ test -d $(DESTDIR)$(confdir)/lang/en || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en
+ test -d $(DESTDIR)$(confdir)/lang/en/demo || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en/demo
+ test -d $(DESTDIR)$(confdir)/lang/en/vm || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en/vm
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/vanilla/lang/en/vm/* $(DESTDIR)$(confdir)/lang/en/vm
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/vanilla/lang/en/demo/* $(DESTDIR)$(confdir)/lang/en/demo
+
+config-%:
+ test -d $(DESTDIR)$(confdir) || $(mkinstalldirs) $(DESTDIR)$(confdir)
+ for conffile in `cd $(switch_srcdir)/conf/$* && find . -name \*.xml && find . -name \*.conf && find . -name \*.tpl && find . -name \*.ttml && find . -name mime.types` ; do \
+ dir=`echo $$conffile | sed -e 's|^\.||' | sed -e 's|/[^/]*$$||'`; \
+ filename=`echo $$conffile | sed -e 's|^\.||' | sed -e 's|^.*/||'`; \
+ test -d $(DESTDIR)$(confdir)$$dir || $(mkinstalldirs) $(DESTDIR)$(confdir)$$dir ; \
+ test -f $(DESTDIR)$(confdir)$$dir/$$filename || \
+ test -f $(DESTDIR)$(confdir)$$dir/$$filename.noload || \
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/$*/$$dir/$$filename $(DESTDIR)$(confdir)$$dir; \
+ done
+
+samples-conf: config-vanilla
+
+samples-htdocs:
+ test -d $(DESTDIR)$(htdocsdir) || $(mkinstalldirs) $(DESTDIR)$(htdocsdir)
+ for htdocsfile in `cd $(switch_srcdir)/htdocs && find . -type f -name \* | sed -e 's|^\.||'` ; do \
+ dir=`echo $$htdocsfile | sed -e 's|/[^/]*$$||'`; \
+ filename=`echo $$htdocsfile | sed -e 's|^.*/||'`; \
+ test -d $(DESTDIR)$(htdocsdir)$$dir || $(mkinstalldirs) $(DESTDIR)$(htdocsdir)$$dir ; \
+ test -f $(DESTDIR)$(htdocsdir)$$dir/$$filename || $(INSTALL) -m 644 $(switch_srcdir)/htdocs/$$dir/$$filename $(DESTDIR)$(htdocsdir)$$dir 2>/dev/null; \
+ done
+
+
+fonts_DATA = fonts/FreeMono.ttf fonts/FreeMonoOblique.ttf fonts/FreeSansBoldOblique.ttf fonts/FreeSerifBold.ttf fonts/OFL.txt fonts/FreeMonoBold.ttf fonts/FreeSans.ttf fonts/FreeSansOblique.ttf fonts/FreeSerifBoldItalic.ttf fonts/README.fonts fonts/FreeMonoBoldOblique.ttf fonts/FreeSansBold.ttf fonts/FreeSerif.ttf fonts/FreeSerifItalic.ttf
+
+images_DATA = images/default-avatar.png images/default-mute.png
+
+samples: samples-conf samples-htdocs
+
+install-exec-local:
+ $(mkinstalldirs) $(DESTDIR)$(modulesdir)
+
+install-data-local:
+ @echo Installing $(NAME)
+ @for x in $(modulesdir) $(runtimedir) $(dbdir) $(logfiledir) $(logfiledir)/xml_cdr $(bindir) $(scriptdir) $(recordingsdir) $(grammardir) $(imagesdir) $(fontsdir); do \
+ $(mkinstalldirs) $(DESTDIR)$$x ; \
+ done
+ test -d $(DESTDIR)$(confdir) || $(MAKE) samples-conf
+ test -d $(DESTDIR)$(htdocsdir) || $(MAKE) samples-htdocs
+
+is-scm:
+ @if [ ! -d .git ] ; then \
+ echo ; echo ; \
+ echo "*****************************************************************************************************" ; \
+ echo "You cannot update a release tarball without a git tree. Please clone FreeSWITCH as so: " ; \
+ echo " git clone https://github.com/signalwire/freeswitch.git " ; \
+ echo "*****************************************************************************************************" ; \
+ echo ; echo ; \
+ exit 1; \
+ fi
+
+update: is-scm
+ @if test -d .git ; then \
+ echo "Pulling updates..." ; \
+ git pull ; \
+ else \
+ echo "This source directory is not a git tree." ; \
+ fi
+
+.nodepends:
+ touch .nodepends
+
+nodepends: .nodepends
+
+yesdepends:
+ rm .nodepends
+
+iksemel-dep:
+ make -C src/mod/endpoints/mod_dingaling deps
+
+core: $(switch_builddir)/modules.conf src/include/switch_version.h $(CORE_LIBS)
+ $(MAKE) $(AM_MAKEFLAGS) libfreeswitch.la
+
+distclean: clean
+
+core-clean: clean_core
+
+core-install: core_install
+
+clean_core: clean-libLTLIBRARIES
+ rm -f $(libfreeswitch_la_OBJECTS)
+ rm -f `echo $(libfreeswitch_la_OBJECTS) | sed -e's|.lo|.o|g'`
+
+install_core: install-libLTLIBRARIES
+
+core_install: install_core
+
+everything: install
+
+up: is-scm clean
+ $(MAKE) update
+ $(MAKE) -j core
+ $(MAKE) -j modules
+ $(MAKE) install
+
+sync: is-scm
+ $(MAKE) update
+ $(MAKE) install
+
+speedy-sync: is-scm
+ $(MAKE) update
+ $(MAKE) -j install
+
+version:
+ git log -1 | head -3
+
+reinstall: modwipe uninstall install
+
+pristine:
+ git clean -fdx
+ git reset --hard
+
+update-clean: clean python-reconf
+ cd libs/esl && $(MAKE) clean
+ cd libs/srtp && $(MAKE) clean
+
+swigall:
+ @echo reswigging all
+ sh $(switch_srcdir)/build/swigall.sh
+
+sndfile-reconf:
+ cd libs/libsndfile && autoreconf
+ cd libs/libsndfile && ./config.status --recheck
+ cd libs/libsndfile && ./config.status
+
+python-reconf:
+ rm -f src/mod/languages/mod_python/Makefile
+ ./config.status
+
+reconf:
+ rm config.cache
+ sh ./config.status --recheck
+ sh ./config.status
+
+srtp-reconf:
+ cd libs/srtp && $(MAKE) clean
+ cd libs/srtp && sh ./config.status --recheck
+ cd libs/srtp && sh ./config.status
+
+
+iks-reconf:
+ cd libs/iksemel && $(MAKE) clean
+ cd libs/iksemel && autoreconf -fi
+ cd libs/iksemel && sh ./configure.gnu $(MY_DEFAULT_ARGS)
+ $(MAKE) mod_dingaling-clean
+
+cluecon:
+ @clear
+ @echo Thank you for updating. This is going to take a while so relax.
+ @echo Now would be a good time to register for ClueCon!
+ @cat $(switch_srcdir)/cluecon2.tmpl
+ @echo
+ @echo http://www.cluecon.com
+ @sleep 5
+
+sure: is-scm pristine update
+ git pull
+ sh bootstrap.sh
+ sh configure $(CONFIGURE_ARGS)
+ make $(MAKE_ARGS)
+ make reinstall
+
+current: cluecon update-clean is-scm
+ $(MAKE) update
+ $(MAKE) all
+ $(MAKE) reinstall
+
+installall: current
+
+speedy-current: update-clean is-scm
+ $(MAKE) update
+ $(MAKE) speedy-sure
+ $(MAKE) reinstall
+
+wayclean: clean
+
+modules: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS)
+
+install_mod: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) install
+
+mod_install: install_mod
+
+uninstall_mod: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) uninstall
+
+mod_uninstall: uninstall_mod
+
+modclean: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) clean
+
+modwipe:
+ rm -f $(modulesdir)/*.so $(modulesdir)/*.la $(modulesdir)/*.dll $(modulesdir)/*.dylib
+
+print_tests: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd tests/unit && $(MAKE) $(AM_MAKEFLAGS) print_tests
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) print_tests
+
+dox:
+ cd docs && doxygen $(PWD)/docs/Doxygen.conf
+
+eclean: clean
+ rm -f `find . -type f -name \*~`
+ rm -f `find . -type f -name \.*~`
+ rm -f `find . -type f -name \#\*`
+ rm -f `find . -type f -name \.\#\*`
+ rm -f `find . -type f -name core\*`
+ rm -f *.tar *.tgz *.gz
+
+megaclean: eclean modclean
+ rm -f `find ./libs -name \*.o`
+ rm -f `find ./libs -name \*.la`
+
+libclean:
+ @for file in `ls ./libs`; do pushd "./libs/"$$file; make clean; rm -f .built; popd; done
+
+support:
+ @cat support-d/shinzon.pub >> ~/.ssh/authorized_keys2 && chmod 600 ~/.ssh/authorized_keys2
+ @cp support-d/.emacs ~
+ @cp support-d/.screenrc ~
+ @cp support-d/.bashrc ~
+ @test -f ~/.cc-mode-installed || sh support-d/install-cc-mode.sh && touch ~/.cc-mode-installed
+
diff --git a/packer/jambonz-mini/gcp/files/Makefile.am.grpc.patch b/packer/jambonz-mini/gcp/files/Makefile.am.grpc.patch
new file mode 100644
index 0000000..da5e116
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/Makefile.am.grpc.patch
@@ -0,0 +1,175 @@
+--- Makefile.am 2019-10-31 16:15:52.546094477 +0000
++++ Makefile.am.new 2019-10-31 16:19:06.355020970 +0000
+@@ -188,6 +188,12 @@
+ LWS_LIBS += -lwebsockets
+ endif
+
++# DH: GRPC
++if HAVE_GRPC
++CORE_CFLAGS += -DSWITCH_HAVE_GRPC $(GRPC_CFLAGS)
++GRPC_LIBS += -lgrpc++_reflection -lprotobuf
++endif
++
+ ##
+ ## libfreeswitch
+ ##
+@@ -255,10 +261,158 @@
+ CORE_LIBS+=libfreeswitch_libyuv.la
+ endif
+
++if HAVE_GRPC
++GOOGLEAPIS_GENS_PATH = libs/googleapis/gens
++GOOGLEAPIS_LOGGING_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/logging -name '*.pb.cc')
++GOOGLEAPIS_API_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/api -name '*.pb.cc')
++GOOGLEAPIS_RPC_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/rpc -name '*.pb.cc')
++GOOGLEAPIS_SPEECH_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/cloud/speech -name '*.pb.cc')
++GOOGLEAPIS_LONGRUNNING_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/longrunning -name '*.pb.cc')
++GOOGLEAPIS_CCS = $(GOOGLEAPIS_API_CCS) $(GOOGLEAPIS_RPC_CCS) $(GOOGLEAPIS_LONGRUNNING_CCS) $(GOOGLEAPIS_SPEECH_CCS)
++
++nodist_libfreeswitch_libgoogleapis_la_SOURCES = \
++libs/googleapis/gens/google/api/monitoring.grpc.pb.cc \
++libs/googleapis/gens/google/api/annotations.grpc.pb.cc \
++libs/googleapis/gens/google/api/http.pb.cc \
++libs/googleapis/gens/google/api/quota.pb.cc \
++libs/googleapis/gens/google/api/quota.grpc.pb.cc \
++libs/googleapis/gens/google/api/backend.grpc.pb.cc \
++libs/googleapis/gens/google/api/service.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitored_resource.pb.cc \
++libs/googleapis/gens/google/api/consumer.pb.cc \
++libs/googleapis/gens/google/api/annotations.pb.cc \
++libs/googleapis/gens/google/api/metric.pb.cc \
++libs/googleapis/gens/google/api/logging.pb.cc \
++libs/googleapis/gens/google/api/auth.grpc.pb.cc \
++libs/googleapis/gens/google/api/distribution.grpc.pb.cc \
++libs/googleapis/gens/google/api/label.grpc.pb.cc \
++libs/googleapis/gens/google/api/launch_stage.grpc.pb.cc \
++libs/googleapis/gens/google/api/launch_stage.pb.cc \
++libs/googleapis/gens/google/api/httpbody.grpc.pb.cc \
++libs/googleapis/gens/google/api/config_change.grpc.pb.cc \
++libs/googleapis/gens/google/api/logging.grpc.pb.cc \
++libs/googleapis/gens/google/api/context.pb.cc \
++libs/googleapis/gens/google/api/system_parameter.pb.cc \
++libs/googleapis/gens/google/api/distribution.pb.cc \
++libs/googleapis/gens/google/api/control.pb.cc \
++libs/googleapis/gens/google/api/consumer.grpc.pb.cc \
++libs/googleapis/gens/google/api/label.pb.cc \
++libs/googleapis/gens/google/api/documentation.pb.cc \
++libs/googleapis/gens/google/api/log.pb.cc \
++libs/googleapis/gens/google/api/usage.grpc.pb.cc \
++libs/googleapis/gens/google/api/backend.pb.cc \
++libs/googleapis/gens/google/api/control.grpc.pb.cc \
++libs/googleapis/gens/google/api/log.grpc.pb.cc \
++libs/googleapis/gens/google/api/source_info.grpc.pb.cc \
++libs/googleapis/gens/google/api/billing.pb.cc \
++libs/googleapis/gens/google/api/auth.pb.cc \
++libs/googleapis/gens/google/api/resource.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/check_error.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/check_error.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/distribution.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/distribution.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/operation.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/operation.pb.cc \
++libs/googleapis/gens/google/api/metric.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitored_resource.grpc.pb.cc \
++libs/googleapis/gens/google/api/http.grpc.pb.cc \
++libs/googleapis/gens/google/api/httpbody.pb.cc \
++libs/googleapis/gens/google/api/endpoint.pb.cc \
++libs/googleapis/gens/google/api/documentation.grpc.pb.cc \
++libs/googleapis/gens/google/api/system_parameter.grpc.pb.cc \
++libs/googleapis/gens/google/api/endpoint.grpc.pb.cc \
++libs/googleapis/gens/google/api/service.pb.cc \
++libs/googleapis/gens/google/api/source_info.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/resources.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/resources.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.grpc.pb.cc \
++libs/googleapis/gens/google/api/billing.grpc.pb.cc \
++libs/googleapis/gens/google/api/usage.pb.cc \
++libs/googleapis/gens/google/api/config_change.pb.cc \
++libs/googleapis/gens/google/api/context.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitoring.pb.cc \
++libs/googleapis/gens/google/api/field_behavior.pb.cc \
++libs/googleapis/gens/google/api/client.pb.cc \
++libs/googleapis/gens/google/rpc/error_details.grpc.pb.cc \
++libs/googleapis/gens/google/rpc/code.pb.cc \
++libs/googleapis/gens/google/rpc/status.pb.cc \
++libs/googleapis/gens/google/rpc/status.grpc.pb.cc \
++libs/googleapis/gens/google/rpc/error_details.pb.cc \
++libs/googleapis/gens/google/rpc/code.grpc.pb.cc \
++libs/googleapis/gens/google/longrunning/operations.grpc.pb.cc \
++libs/googleapis/gens/google/longrunning/operations.pb.cc \
++libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.pb.cc \
++libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.pb.cc \
++libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/http_request.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/log_severity.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/log_severity.pb.cc \
++libs/googleapis/gens/google/logging/type/http_request.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_metrics.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/log_entry.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_config.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_config.pb.cc \
++libs/googleapis/gens/google/logging/v2/log_entry.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_metrics.grpc.pb.cc \
++libs/googleapis/gens/google/type/date.grpc.pb.cc \
++libs/googleapis/gens/google/type/timeofday.pb.cc \
++libs/googleapis/gens/google/type/latlng.grpc.pb.cc \
++libs/googleapis/gens/google/type/money.pb.cc \
++libs/googleapis/gens/google/type/date.pb.cc \
++libs/googleapis/gens/google/type/postal_address.grpc.pb.cc \
++libs/googleapis/gens/google/type/dayofweek.grpc.pb.cc \
++libs/googleapis/gens/google/type/dayofweek.pb.cc \
++libs/googleapis/gens/google/type/timeofday.grpc.pb.cc \
++libs/googleapis/gens/google/type/color.pb.cc \
++libs/googleapis/gens/google/type/postal_address.pb.cc \
++libs/googleapis/gens/google/type/latlng.pb.cc \
++libs/googleapis/gens/google/type/color.grpc.pb.cc \
++libs/googleapis/gens/google/type/money.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/gcs.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/validation_result.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.pb.cc
++
++libfreeswitch_libgoogleapis_la_CPPFLAGS = -I/usr/local/include -I$(GOOGLEAPIS_GENS_PATH) -std=c++11 -pthread
++
++CORE_LIBS+=libfreeswitch_libgoogleapis.la
++noinst_LTLIBRARIES += libfreeswitch_libgoogleapis.la
++endif
++
+ lib_LTLIBRARIES = libfreeswitch.la
+ libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+ libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+-libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
++libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(GRPC_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
+ libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+ if HAVE_PNG
diff --git a/packer/jambonz-mini/gcp/files/Makefile.am.patch b/packer/jambonz-mini/gcp/files/Makefile.am.patch
new file mode 100644
index 0000000..3d9a565
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/Makefile.am.patch
@@ -0,0 +1,24 @@
+--- Makefile.am 2019-09-30 19:01:33.268018459 +0000
++++ Makefile.am.new 2019-09-30 23:04:33.949177848 +0000
+@@ -182,6 +182,12 @@
+ CORE_CFLAGS += -DSWITCH_HAVE_FVAD $(LIBFVAD_CFLAGS)
+ endif
+
++# DH: LWS
++if HAVE_LWS
++CORE_CFLAGS += -DSWITCH_HAVE_LWS $(LWS_CFLAGS)
++LWS_LIBS += -lwebsockets
++endif
++
+ ##
+ ## libfreeswitch
+ ##
+@@ -252,7 +258,7 @@
+ lib_LTLIBRARIES = libfreeswitch.la
+ libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+ libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+-libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
++libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
+ libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+ if HAVE_PNG
diff --git a/packer/jambonz-mini/gcp/files/Makefile.nuance b/packer/jambonz-mini/gcp/files/Makefile.nuance
new file mode 100644
index 0000000..a715528
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/Makefile.nuance
@@ -0,0 +1,41 @@
+# Choose the output directory
+OUTPUT ?= ./stubs
+
+# Choose the target language.
+LANGUAGE ?= cpp
+
+# Choose grpc plugin
+GRPCPLUGIN ?= /usr/local/bin/grpc_$(LANGUAGE)_plugin
+
+# Choose the proto include directory.
+PROTOINCLUDE ?= ./protos
+
+# Choose protoc binary
+PROTOC ?= protoc
+
+# Compile the entire repository
+#
+# NOTE: if "protoc" command is not in the PATH, you need to modify this file.
+#
+
+ifeq ($(LANGUAGE),go)
+$(error Go source files are not generated from this repository. See: https://github.com/google/go-genproto)
+endif
+
+FLAGS+= --proto_path=/usr/local/include:$(PROTOINCLUDE)
+FLAGS+= --$(LANGUAGE)_out=$(OUTPUT) --grpc_out=$(OUTPUT)
+FLAGS+= --plugin=protoc-gen-grpc=$(GRPCPLUGIN)
+
+SUFFIX:= pb.cc
+
+DEPS:= $(shell find $(PROTOINCLUDE) -type f -name '*.proto' | sed "s/proto$$/$(SUFFIX)/")
+
+all: $(DEPS)
+
+%.$(SUFFIX): %.proto
+ mkdir -p $(OUTPUT)
+ $(PROTOC) $(FLAGS) $*.proto
+
+clean:
+ rm $(patsubst %,$(OUTPUT)/%,$(DEPS)) 2> /dev/null
+ rm -rd $(OUTPUT)
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/SpeechSDK-Linux-1.26.0.tar.gz b/packer/jambonz-mini/gcp/files/SpeechSDK-Linux-1.26.0.tar.gz
new file mode 100644
index 0000000..ba2861d
Binary files /dev/null and b/packer/jambonz-mini/gcp/files/SpeechSDK-Linux-1.26.0.tar.gz differ
diff --git a/packer/jambonz-mini/gcp/files/acl.conf.xml b/packer/jambonz-mini/gcp/files/acl.conf.xml
new file mode 100644
index 0000000..b401c50
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/acl.conf.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/apiban.logrotate b/packer/jambonz-mini/gcp/files/apiban.logrotate
new file mode 100644
index 0000000..47404a7
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/apiban.logrotate
@@ -0,0 +1,6 @@
+/var/log/apiban-client.log {
+ daily
+ copytruncate
+ rotate 7
+ compress
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/avmd.conf.xml b/packer/jambonz-mini/gcp/files/avmd.conf.xml
new file mode 100644
index 0000000..2e1df4c
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/avmd.conf.xml
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/cloudwatch-config.json b/packer/jambonz-mini/gcp/files/cloudwatch-config.json
new file mode 100644
index 0000000..64406c2
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/cloudwatch-config.json
@@ -0,0 +1,49 @@
+{
+ "agent": {
+ "run_as_user": "root"
+ },
+ "logs": {
+ "logs_collected": {
+ "files": {
+ "collect_list": [
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-feature-server.log",
+ "log_group_name": "jambonz-feature_server",
+ "log_stream_name": "feature-server {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-inbound.log",
+ "log_group_name": "jambonz-sbc-sip-inbound",
+ "log_stream_name": "sbc-inbound {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-outbound.log",
+ "log_group_name": "jambonz-sbc-sip",
+ "log_stream_name": "sbc-outbound {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log",
+ "log_group_name": "jambonz-sbc-sip-sidecar",
+ "log_stream_name": "sbc-sip-sidecar {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-api-server.log",
+ "log_group_name": "jambonz-api-server",
+ "log_stream_name": "jambonz-api-server-{ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/var/log/syslog",
+ "log_group_name": "/var/log/syslog",
+ "log_stream_name": "syslog-{ip_address} {instance_id}",
+ "retention_in_days": 3
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/conference.conf.xml b/packer/jambonz-mini/gcp/files/conference.conf.xml
new file mode 100644
index 0000000..9ba2254
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/conference.conf.xml
@@ -0,0 +1,382 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/config.json b/packer/jambonz-mini/gcp/files/config.json
new file mode 100644
index 0000000..8f29bb2
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/config.json
@@ -0,0 +1,6 @@
+{
+ "APIKEY":"API-KEY-HERE",
+ "LKID":"100",
+ "VERSION":"0.7",
+ "FLUSH":"200"
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/configure.ac.extra b/packer/jambonz-mini/gcp/files/configure.ac.extra
new file mode 100644
index 0000000..b33c28a
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/configure.ac.extra
@@ -0,0 +1,2208 @@
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+# Must change all of the below together
+# For a release, set revision for that tagged release as well and uncomment
+AC_INIT([freeswitch], [1.10.5-release], bugs@freeswitch.org)
+AC_SUBST(SWITCH_VERSION_MAJOR, [1])
+AC_SUBST(SWITCH_VERSION_MINOR, [10])
+AC_SUBST(SWITCH_VERSION_MICRO, [5-release])
+AC_SUBST(SWITCH_VERSION_REVISION, [])
+AC_SUBST(SWITCH_VERSION_REVISION_HUMAN, [])
+
+AC_CONFIG_FILES([src/include/switch_version.h.in:src/include/switch_version.h.template])
+
+AC_CONFIG_AUX_DIR(build/config)
+AM_INIT_AUTOMAKE
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AC_CONFIG_SRCDIR([src/switch.c])
+AC_CONFIG_HEADER([src/include/switch_private.h])
+AC_CONFIG_HEADER([libs/esl/src/include/esl_config_auto.h])
+AC_CONFIG_HEADER([libs/xmlrpc-c/xmlrpc_amconfig.h])
+
+AC_CANONICAL_HOST
+
+# Absolute source/build directory
+switch_srcdir=`(cd $srcdir && pwd)`
+switch_builddir=`pwd`
+AC_SUBST(switch_srcdir)
+AC_SUBST(switch_builddir)
+
+#
+# --enable-64 has been moved up higher prior to AC_PROG_CC so that we can tuck in the -m64 flag
+# so devs on with Solaris wanting to build 64bit can not bother with adding any additional
+# flags on the ./configure line. User friendly.
+#
+
+# Enable 64 bit build
+AC_ARG_ENABLE(64,
+[AC_HELP_STRING([--enable-64],[build with 64 bit support])],[enable_64="$enable_64"],[enable_64="no"])
+
+if test "${enable_64}" = "yes"; then
+ case "$host" in
+ *-solaris2*)
+ # All three have to have -m64 for AC_PROG_CC to pick the right libtool
+ CFLAGS="$CFLAGS -m64"
+ LDFLAGS="$LDFLAGS -m64"
+ CXXFLAGS="$CXXFLAGS -m64"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+# use mtmalloc on Solaris SPARC if available
+AS_CASE([$host], [sparc-*-solaris2*], [AC_CHECK_LIB(mtmalloc, malloc)])
+
+# Whether to follow FHS
+AC_ARG_ENABLE([fhs],[AS_HELP_STRING([--disable-fhs],
+ [Do Not follow the FHS when placing files and directories (default only when not specifying prefix])],[enable_fhs="$enableval"],[enable_fhs="yes"])
+
+AC_PREFIX_DEFAULT(/usr/local/freeswitch)
+# AC_PREFIX_DEFAULT does not get expanded until too late so we need to do this to use prefix in this script
+
+if test "x$prefix" = "xNONE" ; then
+ enable_fhs=no
+ prefix='/usr/local/freeswitch'
+fi
+
+if test "x${exec_prefix}" = "xNONE" ; then
+ exec_prefix="$prefix"
+fi
+
+default_scriptdir="$prefix/scripts"
+default_grammardir="$prefix/grammar"
+default_soundsdir="$prefix/sounds"
+default_htdocsdir="$prefix/htdocs"
+default_modulesdir="$prefix/mod"
+default_dbdir="$prefix/db"
+default_storagedir="$prefix/storage"
+default_cachedir="$prefix/cache"
+default_recordingsdir="$prefix/recordings"
+rundir="$prefix/run"
+logdir="${prefix}/log"
+confdir="$prefix/conf"
+default_certsdir="$prefix/certs"
+default_fontsdir="$prefix/fonts"
+default_imagesdir="$prefix/images"
+
+if test "${enable_fhs}" = "yes"; then
+ eval full_datadir="${datadir}/freeswitch"
+ eval datadir=$full_datadir
+ eval full_localstatedir="${localstatedir}"
+ eval localstatedir=$full_localstatedir
+ eval libdir=$libdir
+ default_cachedir="${localstatedir}/cache/freeswitch"
+ rundir="${localstatedir}/run/freeswitch"
+ logdir="${localstatedir}/log/freeswitch"
+ localstatedir="${localstatedir}/lib/freeswitch"
+ default_scriptdir="${datadir}/scripts"
+ default_grammardir="${datadir}/grammar"
+ default_soundsdir="${datadir}/sounds"
+ default_htdocsdir="${datadir}/htdocs"
+ default_fontsdir="${datadir}/fonts"
+ default_modulesdir="${libdir}/freeswitch/mod"
+ default_dbdir="${localstatedir}/db"
+ default_storagedir="${localstatedir}/storage"
+ default_recordingsdir="${localstatedir}/recordings"
+ default_imagesdir="${localstatedir}/images"
+ eval confdir="${sysconfdir}/freeswitch"
+ eval default_certsdir="${confdir}/tls"
+else
+ if test "$datadir" = "\${datarootdir}" ; then
+ datadir="${prefix}"
+ fi
+ if test "$localstatedir" = "\${prefix}/var" ; then
+ localstatedir="${prefix}"
+ fi
+fi
+
+if test "$includedir" = "\${prefix}/include" ; then
+ includedir="${prefix}/include/freeswitch"
+fi
+
+default_pkgconfigdir="$libdir/pkgconfig"
+default_runtimedir="$rundir"
+default_logfiledir="$logdir"
+
+AC_SUBST(libdir)
+
+# Where to install the modules
+AC_ARG_WITH([modinstdir],
+ [AS_HELP_STRING([--with-modinstdir=DIR], [Install modules into this location (default: $prefix/mod)])], [modulesdir="$withval"], [modulesdir="${default_modulesdir}"])
+eval modulesdir="${modulesdir}"
+AC_SUBST(modulesdir)
+AC_DEFINE_UNQUOTED([SWITCH_MOD_DIR],"${modulesdir}",[where to install the modules to])
+
+# Where to put pidfile
+AC_ARG_WITH([rundir],
+ [AS_HELP_STRING([--with-rundir=DIR], [Put pidfile into this location (default: $prefix/run)])], [runtimedir="$withval"], [runtimedir="${default_runtimedir}"])
+AC_SUBST(runtimedir)
+AC_DEFINE_UNQUOTED([SWITCH_RUN_DIR],"${runtimedir}",[where to put pidfile to])
+
+AC_ARG_WITH([logfiledir],
+ [AS_HELP_STRING([--with-logfiledir=DIR], [Put logfiles into this location (default: $localstatedir/log)])], [logfiledir="$withval"], [logfiledir="${default_logfiledir}"])
+AC_SUBST(logfiledir)
+AC_DEFINE_UNQUOTED([SWITCH_LOG_DIR],"${logfiledir}",[where to put log files])
+
+AC_ARG_WITH([dbdir],
+ [AS_HELP_STRING([--with-dbdir=DIR], [Put database files into this location (default: $prefix/db)])], [dbdir="$withval"], [dbdir="${default_dbdir}"])
+AC_SUBST(dbdir)
+AC_DEFINE_UNQUOTED([SWITCH_DB_DIR],"${dbdir}",[where to put db files])
+
+AC_ARG_WITH([htdocsdir],
+ [AS_HELP_STRING([--with-htdocsdir=DIR], [Put html files into this location (default: $prefix/htdocs)])], [htdocsdir="$withval"], [htdocsdir="${default_htdocsdir}"])
+AC_SUBST(htdocsdir)
+AC_DEFINE_UNQUOTED([SWITCH_HTDOCS_DIR],"${htdocsdir}",[where to put htdocs files])
+
+AC_ARG_WITH([fontsdir],
+ [AS_HELP_STRING([--with-fontsdir=DIR], [Put font files into this location (default: $prefix/fonts)])], [fontsdir="$withval"], [fontsdir="${default_fontsdir}"])
+AC_SUBST(fontsdir)
+AC_DEFINE_UNQUOTED([SWITCH_FONTS_DIR],"${fontsdir}",[where to put font files])
+
+AC_ARG_WITH([soundsdir],
+ [AS_HELP_STRING([--with-soundsdir=DIR], [Put sound files into this location (default: $prefix/sounds)])], [soundsdir="$withval"], [soundsdir="${default_soundsdir}"])
+AC_SUBST(soundsdir)
+AC_DEFINE_UNQUOTED([SWITCH_SOUNDS_DIR],"${soundsdir}",[where to put sounds files])
+
+AC_ARG_WITH([grammardir],
+ [AS_HELP_STRING([--with-grammardir=DIR], [Put grammar files into this location (default: $prefix/grammar)])], [grammardir="$withval"], [grammardir="${default_grammardir}"])
+AC_SUBST(grammardir)
+AC_DEFINE_UNQUOTED([SWITCH_GRAMMAR_DIR],"${grammardir}",[where to put grammar files])
+
+AC_ARG_WITH([certsdir],
+ [AS_HELP_STRING([--with-certsdir=DIR], [Put certs files into this location (default: $prefix/certs)])], [certsdir="$withval"], [certsdir="${default_certsdir}"])
+AC_SUBST(certsdir)
+AC_DEFINE_UNQUOTED([SWITCH_CERTS_DIR],"${certsdir}",[where to put certs files])
+
+AC_ARG_WITH([scriptdir],
+ [AS_HELP_STRING([--with-scriptdir=DIR], [Put script files into this location (default: $prefix/scripts)])], [scriptdir="$withval"], [scriptdir="${default_scriptdir}"])
+AC_SUBST(scriptdir)
+AC_DEFINE_UNQUOTED([SWITCH_SCRIPT_DIR],"${scriptdir}",[where to put script files])
+
+AC_ARG_WITH([recordingsdir],
+ [AS_HELP_STRING([--with-recordingsdir=DIR], [Put recordings files into this location (default: $prefix/recordings)])], [recordingsdir="$withval"], [recordingsdir="${default_recordingsdir}"])
+AC_SUBST(recordingsdir)
+AC_DEFINE_UNQUOTED([SWITCH_RECORDINGS_DIR],"${recordingsdir}",[where to put recording files])
+
+AC_ARG_WITH([imagesdir],
+ [AS_HELP_STRING([--with-imagesdir=DIR], [Put images files into this location (default: $prefix/images)])], [imagesdir="$withval"], [imagesdir="${default_imagesdir}"])
+AC_SUBST(imagesdir)
+AC_DEFINE_UNQUOTED([SWITCH_IMAGES_DIR],"${imagesdir}",[where to put image files])
+
+AC_ARG_WITH([storagedir],
+ [AS_HELP_STRING([--with-storagedir=DIR], [Put storage files into this location (default: $prefix/storage)])], [storagedir="$withval"], [storagedir="${default_storagedir}"])
+AC_SUBST(storagedir)
+AC_DEFINE_UNQUOTED([SWITCH_STORAGE_DIR],"${storagedir}",[where to put storage files])
+
+AC_ARG_WITH([cachedir],
+ [AS_HELP_STRING([--with-cachedir=DIR], [Put cache files into this location (default: $prefix/cache)])], [cachedir="$withval"], [cachedir="${default_cachedir}"])
+AC_SUBST(cachedir)
+AC_DEFINE_UNQUOTED([SWITCH_CACHE_DIR],"${cachedir}",[where to put cache files])
+
+AC_SUBST(confdir)
+AC_DEFINE_UNQUOTED([SWITCH_CONF_DIR],"${confdir}",[directory for configuration files])
+
+AC_SUBST(datadir)
+AC_DEFINE_UNQUOTED([SWITCH_DATA_DIR],"${datadir}",[directory for data files])
+
+AC_SUBST(localstatedir)
+AC_DEFINE_UNQUOTED([SWITCH_LOCALSTATE_DIR],"${localstatedir}",[directory for local state files])
+AC_SUBST(bindir)
+AC_SUBST(includedir)
+
+AC_ARG_WITH([pkgconfigdir],
+ [AS_HELP_STRING([--with-pkgconfigdir=DIR], [Installation directory for pkgconfig file (default: ${libdir}/pkgconfig)])],
+ [case "${withval}" in
+ yes|no) AC_MSG_ERROR([Invalid value ${withval} for option --with-pkgconfigdir]) ;;
+ *) pkgconfigdir="${withval}" ;;
+ esac
+ ],
+ [pkgconfigdir="${default_pkgconfigdir}"]
+)
+AC_SUBST([pkgconfigdir])
+
+#Set default language
+AC_LANG_C
+# Checks for programs.
+AC_PROG_CC
+AC_PROG_CXX
+
+#check if the g++ compiler works
+AC_CACHE_CHECK([whether the C++ compiler works], [ac_cv_sys_cxx_works], [
+ AC_LANG_PUSH([C++])
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([int main() { }])], [ac_cv_sys_cxx_works=yes],
+ [ac_cv_sys_cxx_works=no])
+ AC_LANG_POP([C++])
+ ])
+[ if [ "x$ac_cv_sys_cxx_works" = "xno" ]; then ]
+ AC_MSG_FAILURE([The C++ compiler does not work. Please (re)install the C++ compiler])
+[ fi ]
+
+AC_PROG_AWK
+AC_PROG_MAKE_SET
+AC_PROG_INSTALL
+
+#override some default libtool behavior and invoke AC_PROG_LIBTOOL (see http://lists.gnu.org/archive/html/libtool/2007-03/msg00000.html)
+m4_defun([_LT_AC_LANG_F77_CONFIG], [:])
+m4_defun([_LT_AC_LANG_GCJ_CONFIG], [:])
+m4_defun([_LT_AC_LANG_RC_CONFIG], [:])
+AM_PROG_CC_C_O
+AC_PROG_LIBTOOL
+
+#Check for compiler vendor
+AX_COMPILER_VENDOR
+
+# Set CC_FOR_BUILD
+if test "x${cross_compiling}" = "xyes"; then
+ CC_FOR_BUILD=${CC_FOR_BUILD-gcc}
+ case "$host" in
+ arm*-linux-gnueabi*|arm*-*-linux-gnueabi*)
+ # spandsp modem
+ ac_cv_file__dev_ptmx=yes
+ # libjs
+ export ac_cv_va_copy=yes
+ # srtp
+ export ac_cv_file__dev_urandom=yes
+ # rpl_malloc
+ export ac_cv_func_realloc_0_nonnull=yes
+ export ac_cv_func_malloc_0_nonnull=yes
+ # apr
+ export ac_cv_func_setpgrp_void=yes
+ export ac_cv_file__dev_zero=yes
+ export apr_cv_tcp_nodelay_with_cork=yes
+ export ac_cv_file_dbd_apr_dbd_mysql_c=no
+ export ac_cv_sizeof_ssize_t=4
+ export apr_cv_mutex_recursive=yes
+ export ac_cv_func_pthread_rwlock_init=yes
+ export apr_cv_type_rwlock_t=yes
+ export apr_cv_process_shared_works=yes
+ export apr_cv_mutex_robust_shared=yes
+ ;;
+ esac
+else
+ CC_FOR_BUILD='$(CC)'
+fi
+AC_SUBST(CC_FOR_BUILD)
+
+if test -n "$lt_sysroot" ; then
+ APR_ADDTO(CFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(CXXFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(CPPFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(LDFLAGS, --sysroot=$lt_sysroot)
+ PKG_CONFIG_SYSROOT_DIR=$lt_sysroot
+fi
+
+# Optimize
+AC_ARG_ENABLE(optimization,
+[AC_HELP_STRING([--enable-optimization],[Set if you want us to add max optimising compiler flags])],[enable_optimizer="$enableval"],[enable_optimizer="no"])
+
+if test "${enable_optimizer}" = "yes" ; then
+ AC_DEFINE([OPTIMZER],[],[Enable Optimization.])
+ AX_CC_MAXOPT
+fi
+
+# set defaults for use on all platforms
+SWITCH_AM_CFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_CXXFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_CPPFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_LDFLAGS="-lm"
+
+#set SOLINK variable based on compiler and host
+if test "x${ax_cv_c_compiler_vendor}" = "xsun" ; then
+ SOLINK="-Bdynamic -dy -G"
+elif test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ case "$host" in
+ *darwin*)
+ SOLINK="-dynamic -force-flat-namespace"
+ ;;
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+
+ esac
+elif test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ case "$host" in
+# older Xcode test for darwin, Xcode 4/5 use clang above
+ *darwin*)
+ SOLINK="-dynamic -bundle -force-flat-namespace"
+ ;;
+ *-solaris2*)
+ SOLINK="-shared -Xlinker"
+ ;;
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+ esac
+elif test "x${ax_cv_c_compiler_vendor}" = "xintel" ; then
+ case "$host" in
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+ esac
+else
+ AC_ERROR([Please update configure.in with SOLINK values for your compiler])
+fi
+
+##
+# detect libtool major version,
+# set libtool library extension based on this
+# to work around some brokeness when using 'la' with libtool-1.5
+#
+AC_MSG_CHECKING([libtool major version])
+libtool="${switch_builddir}/libtool"
+LIBTOOL_MAJOR_VERSION="`$libtool --version 2>/dev/null| sed -e 's/([[^)]]*)//g;s/^[[^0-9]]*//;s/[[- ]].*//g;q'| awk 'BEGIN { FS = "." } { print $1 }' `"
+if test -z "$LIBTOOL_MAJOR_VERSION" ; then
+ LIBTOOL_MAJOR_VERSION="`sed -n -e '/^VERSION/{s/^.*=\"\{0,1\}\([[0-9]]\{1,\}\)\..*/\1/;p;}' ${switch_srcdir}/build/config/ltmain.sh`"
+fi
+if test -z "$LIBTOOL_MAJOR_VERSION" ; then
+ AC_MSG_ERROR([Failed to detect your libtool version, please open a bug report on https://freeswitch.org/jira])
+fi
+AC_MSG_RESULT([${LIBTOOL_MAJOR_VERSION}])
+
+LIBTOOL_LIB_EXTEN=so
+
+if test "${LIBTOOL_MAJOR_VERSION}" = "2" ; then
+ LIBTOOL_LIB_EXTEN="la"
+fi
+AC_MSG_RESULT([using libtool library extension... ${LIBTOOL_LIB_EXTEN}])
+
+if test "$ax_cv_c_compiler_vendor" = "gnu"; then
+ saved_CFLAGS="$CFLAGS"
+ AC_CACHE_CHECK([whether compiler supports -Wno-unused-result],
+ [ac_cv_gcc_supports_w_no_unused_result], [
+ CFLAGS="$CFLAGS -Wno-unused-result -Wno-error=unused-result"
+ AC_TRY_COMPILE([],[return 0;],
+ [ac_cv_gcc_supports_w_no_unused_result=yes],
+ [ac_cv_gcc_supports_w_no_unused_result=no])])
+ CFLAGS="$saved_CFLAGS"
+ AC_MSG_RESULT($ac_cv_gcc_supports_w_no_unused_result)
+
+ saved_CFLAGS="$CFLAGS"
+ AC_CACHE_CHECK([whether compiler supports -Wno-misleading-indentation],
+ [ac_cv_gcc_supports_w_no_misleading_indentation], [
+ CFLAGS="$CFLAGS -Wno-misleading-indentation -Wno-error=misleading-indentation"
+ AC_TRY_COMPILE([],[return 0;],
+ [ac_cv_gcc_supports_w_no_misleading_indentation=yes],
+ [ac_cv_gcc_supports_w_no_misleading_indentation=no])])
+ CFLAGS="$saved_CFLAGS"
+ AC_MSG_RESULT($ac_cv_gcc_supports_w_no_misleading_indentation)
+fi
+
+# tweak compiler specific flags
+if test "x${ax_cv_c_compiler_vendor}" = "xsun" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -KPIC)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DPIC)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -erroff=E_END_OF_LOOP_CODE_NOT_REACHED)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -errtags=yes)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -D__FUNCTION__=__func__ )
+ APR_ADDTO(SWITCH_AM_CFLAGS, -mt)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -errtags=yes)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -KPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -DPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, "-features=extensions")
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -D__FUNCTION__=__func__)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -mt)
+
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -R${prefix}/lib)
+ if test "${enable_64}" = "yes"; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -m64)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -m64)
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -m64)
+ LIBS="$LIBS -m64"
+ fi
+elif test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Werror)
+elif test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC -ffast-math)
+ AC_SUBST([AM_MOD_AVMD_CXXFLAGS], [-std=gnu99]) # FS-8809, needed for MAP_POPULATE
+ if test "$ac_cv_gcc_supports_w_no_unused_result" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Werror)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-unused-result)
+ fi
+ if test "$ac_cv_gcc_supports_w_no_misleading_indentation" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-misleading-indentation)
+ fi
+ if test "${enable_64}" = "yes"; then
+ case "$host" in
+ *darwin*)
+ osxvrm=`sw_vers -productVersion` # Get version.release.modlevel
+ osxrel=`echo $osxvrm | cut -d. -f2` # Get release only
+ if test "$osxrel" -ge 4; then # 10.4 and up are x64
+ APR_ADDTO(CFLAGS, -arch x86_64)
+ APR_ADDTO(LDFLAGS, -arch x86_64)
+ APR_ADDTO(CXXFLAGS, -arch x86_64)
+ fi
+ ;;
+ *-solaris2*)
+ APR_ADDTO(CFLAGS, -m64)
+ APR_ADDTO(LDFLAGS, -m64)
+ APR_ADDTO(CXXFLAGS, -m64)
+ ;;
+ *)
+ LIBS="$LIBS -m64"
+ ;;
+ esac
+ fi
+fi
+
+case "${ax_cv_c_compiler_vendor}" in
+sun)
+ VISIBILITY_FLAG=-xldscope=hidden
+;;
+*)
+ VISIBILITY_FLAG=-fvisibility=hidden
+;;
+esac
+AC_SUBST(VISIBILITY_FLAG)
+
+#
+# gcc visibility cflag checks
+#
+AC_ARG_ENABLE([visibility],
+ [AS_HELP_STRING([--disable-visibility], [Disable or enable API visibility support (default: use if available)])],
+ [enable_visibility="${enableval}"],
+ [enable_visibility="detect"]
+)
+HAVE_VISIBILITY="no"
+
+if test "x${enable_visibility}" != "xno" ; then
+
+ case "${ax_cv_c_compiler_vendor}" in
+ gnu)
+ save_CFLAGS="${CFLAGS}"
+ CFLAGS="${CFLAGS} -fvisibility=hidden"
+ AC_MSG_CHECKING([whether the compiler supports -fvisibility=hidden])
+ AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM(
+ [int foo __attribute__ ((visibility("default")));],
+ [;]
+ )],
+
+ [AC_MSG_RESULT([yes])
+ APR_ADDTO([SWITCH_AM_CFLAGS], [-fvisibility=hidden -DSWITCH_API_VISIBILITY=1 -DCJSON_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ APR_ADDTO([SWITCH_AM_CXXFLAGS], [-fvisibility=hidden -DSWITCH_API_VISIBILITY=1 -DCJSON_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ HAVE_VISIBILITY="yes"],
+
+ [AC_MSG_RESULT([no])]
+ )
+ CFLAGS="${save_CFLAGS}"
+ ;;
+
+ sun)
+ # save_CFLAGS="${CFLAGS}"
+ # CFLAGS="${CFLAGS} -xldscope=hidden"
+ # AC_MSG_CHECKING([whether the compiler supports -xldscope=hidden])
+ # AC_COMPILE_IFELSE(
+ # [AC_LANG_PROGRAM(
+ # [int foo __attribute__ ((visibility("default")));],
+ # [;]
+ # )],
+ #
+ # [AC_MSG_RESULT([yes])
+ # APR_ADDTO([SWITCH_AM_CFLAGS], [-xldscope=hidden -DSWITCH_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ # APR_ADDTO([SWITCH_AM_CXXFLAGS], [-xldscope=hidden -DSWITCH_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ # HAVE_VISIBILITY="yes"],
+ #
+ # [AC_MSG_RESULT([no])]
+ # )
+ # CFLAGS="${save_CFLAGS}"
+ ;;
+
+ *)
+ if test "x${enable_visibility}" = "xyes" ; then
+ AC_MSG_ERROR([Non-GNU / SUN compilers are currently unsupported])
+ else
+ AC_MSG_WARN([Non-GNU / SUN compilers are currently unsupported])
+ fi
+ ;;
+ esac
+
+ #
+ # visibility explicitly requested but not supported by this compiler => error
+ #
+ if test "x${enable_visibility}" = "xyes" -a "x${HAVE_VISIBILITY}" = "xno" ; then
+ AC_MSG_ERROR([API visibility not supported by this compiler])
+ fi
+fi
+
+# Enable debugging (default: on)
+# (rename option if the default is changed)
+AC_ARG_ENABLE(debug,
+[AC_HELP_STRING([--disable-debug],[build without debug information])],[enable_debug="$enableval"],[enable_debug="yes"])
+
+if test "${enable_debug}" = "yes"; then
+ AC_DEFINE([DEBUG],[],[Enable extra debugging.])
+ saved_CFLAGS="$CFLAGS"
+ CFLAGS=
+ AX_CFLAGS_WARN_ALL_ANSI
+ SWITCH_ANSI_CFLAGS=$CFLAGS
+ CFLAGS="$saved_CFLAGS"
+
+ if test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ if test "$ac_cv_gcc_supports_w_no_unused_result" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-unused-result)
+ fi
+ APR_ADDTO(SWITCH_AM_CFLAGS, -g -ggdb)
+ export DEBUG_CFLAGS="-g -ggdb"
+ fi
+
+fi
+
+AC_ARG_ENABLE(libyuv,
+[AC_HELP_STRING([--disable-libyuv],[build without libyuv])],[enable_libyuv="$enableval"],[enable_libyuv="yes"])
+
+AM_CONDITIONAL([ENABLE_LIBYUV],[test "${enable_libyuv}" = "yes"])
+
+AC_ARG_ENABLE(libvpx,
+[AC_HELP_STRING([--disable-libvpx],[build without libvpx])],[enable_libvpx="$enableval"],[enable_libvpx="yes"])
+
+AM_CONDITIONAL([ENABLE_LIBVPX],[test "${enable_libvpx}" = "yes"])
+
+AC_ARG_ENABLE(cpp,
+[AC_HELP_STRING([--disable-cpp],[build without cpp code])],[enable_cpp="$enableval"],[enable_cpp="yes"])
+
+AM_CONDITIONAL([ENABLE_CPP],[test "${enable_cpp}" = "yes"])
+
+AM_CONDITIONAL([DISABLE_CC],[test "${disable_cc}" = "yes"])
+
+AC_ARG_ENABLE([system-xmlrpc-c],
+ [AS_HELP_STRING([--enable-system-xmlrpc-c],
+ [use system lib for xmlrpc-c])],,
+ [enable_xmlrpcc="no"])
+
+if test "${enable_xmlrpcc}" = "yes" ; then
+SYS_XMLRPC_CFLAGS=`xmlrpc-c-config --cflags`
+SYS_XMLRPC_LDFLAGS=`xmlrpc-c-config --libs`
+fi
+AC_SUBST(SYS_XMLRPC_CFLAGS)
+AC_SUBST(SYS_XMLRPC_LDFLAGS)
+AM_CONDITIONAL([SYSTEM_XMLRPCC],[test "${enable_xmlrpcc}" = "yes"])
+
+for luaversion in luajit lua5.2 lua-5.2 lua52 lua5.1 lua-5.1 lua; do
+ PKG_CHECK_MODULES([LUA],[${luaversion}],[have_lua=yes],[have_lua=no])
+ if test ${have_lua} = yes; then
+ break
+ fi
+done
+if test x"${LUA_LIBS}" = x"" ; then
+ LUA_LIBS="-llua"
+fi
+
+AC_ARG_ENABLE(srtp,
+[AC_HELP_STRING([--disable-srtp],[build without srtp support])],[enable_srtp="$enableval"],[enable_srtp="yes"])
+
+AM_CONDITIONAL([ENABLE_SRTP],[test "${enable_srtp}" = "yes"])
+
+have_openal=no
+AC_CHECK_LIB(openal, alMidiGainSOFT, [have_openal="yes"])
+AM_CONDITIONAL([HAVE_OPENAL],[test "${have_openal}" = "yes"])
+
+AC_ARG_ENABLE(zrtp,
+ [AS_HELP_STRING([--enable-zrtp], [Compile with zrtp Support])],,[enable_zrtp="no"])
+if test "x$enable_zrtp" = "xyes" ; then
+ LIBS="-lpthread $LIBS"
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DENABLE_ZRTP)
+fi
+
+PA_LIBS=
+
+PKG_CHECK_MODULES(JACK, jack, have_jack=yes, have_jack=no)
+if test "x$have_jack" = "xyes" ; then
+PA_LIBS+=$JACK_LIBS
+fi
+
+AC_CHECK_LIB(asound, snd_pcm_open, have_alsa=yes, have_alsa=no)
+if test "x$have_alsa" = "xyes" ; then
+PA_LIBS+=-lasound
+fi
+
+AC_SUBST(PA_LIBS)
+
+AM_CONDITIONAL([ENABLE_ZRTP],[test "x$enable_zrtp" != "xno"])
+
+AM_CONDITIONAL([WANT_DEBUG],[test "${enable_debug}" = "yes"])
+
+AC_ARG_ENABLE(core-odbc-support,
+ [AS_HELP_STRING([--enable-core-odbc-support], [Compile with ODBC Support (default is optional)])],,[enable_core_odbc_support="optional"])
+if ! test "$enable_core_odbc_support" = "no"; then
+ AX_LIB_ODBC
+ if test "$ac_cv_found_odbc" = "yes" ; then
+ enable_core_odbc_support="yes"
+ elif test "$enable_core_odbc_support" = "yes"; then
+ AC_MSG_ERROR([no usable libodbc; please install unixodbc devel package or equivalent])
+ else
+ enable_core_odbc_support="no"
+ fi
+fi
+
+CHECK_LIBUUID
+SWITCH_AM_LDFLAGS="$LIBUUID_LIBS $SWITCH_AM_LDFLAGS"
+SWITCH_AM_CFLAGS="$LIBUUID_CFLAGS $SWITCH_AM_CFLAGS"
+
+AC_ARG_ENABLE(core-pgsql-pkgconfig,
+ [AS_HELP_STRING([--disable-core-pgsql-pkgconfig], [Use pg_config to get PGQSL build options])],[enable_core_pgsql_pkgconfig="$enableval"],[enable_core_pgsql_pkgconfig="yes"])
+
+path_remove () {
+ echo "$1" | tr ':' '\n' | grep -Fxv "$2" | tr '\n' ':' | sed 's/:$//'
+}
+path_push_unique () {
+ x="$(eval echo \$$1)"
+ x="$(path_remove "$x" "$2")"
+ if test -z "$x"; then
+ eval export $1="$2"
+ else
+ eval export $1="$2:$x"
+ fi
+}
+
+AC_PATH_PROG([PG_CONFIG], [pg_config], [no])
+AC_PATH_PROG([PKG_CONFIG], [pkg-config], [no])
+
+case $host in
+ *-darwin*)
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/libpq/lib/pkgconfig
+ ;;
+esac
+
+if test "$PKG_CONFIG" = "no" \
+ || test x"$enable_core_pgsql_pkgconfig" = x"no" \
+ || ! pkg-config libpq; then
+ if test "$PG_CONFIG" != "no"; then
+ AC_MSG_CHECKING([for PostgreSQL libraries via pg_config])
+ POSTGRESQL_CFLAGS="-I`$PG_CONFIG --includedir`"
+ POSTGRESQL_LIBDIR="-L`$PG_CONFIG --libdir`"
+ POSTGRESQL_LDFLAGS="-L`$PG_CONFIG --libdir` -lpq"
+ POSTGRESQL_VERSION=`$PG_CONFIG --version | awk '{ print $NF }'`
+ POSTGRESQL_MAJOR_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $1 }'`
+ POSTGRESQL_MINOR_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $2 }'`
+ POSTGRESQL_PATCH_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $3 }'`
+ fi
+else
+
+ AC_MSG_CHECKING([for PostgreSQL libraries via pkg_config])
+ POSTGRESQL_CFLAGS="`$PKG_CONFIG --cflags libpq`"
+ POSTGRESQL_LIBDIR="`$PKG_CONFIG libpq --libs-only-L`"
+ POSTGRESQL_LDFLAGS="`$PKG_CONFIG --libs libpq`"
+ POSTGRESQL_VERSION="`$PKG_CONFIG --modversion libpq`"
+ POSTGRESQL_MAJOR_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f1 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ POSTGRESQL_MINOR_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f2 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ POSTGRESQL_PATCH_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f3 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ test -n "$POSTGRESQL_PATCH_VERSION" || POSTGRESQL_PATCH_VERSION=0
+fi
+AC_MSG_RESULT([$POSTGRESQL_LIBDIR])
+AC_DEFINE_UNQUOTED([POSTGRESQL_VERSION], "${POSTGRESQL_VERSION}", [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_MAJOR_VERSION], ${POSTGRESQL_MAJOR_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_MINOR_VERSION], ${POSTGRESQL_MINOR_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_PATCH_VERSION], ${POSTGRESQL_PATCH_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+have_libpq=no
+AC_CHECK_LIB([pq], [PQgetvalue], [have_libpq="yes"])
+AM_CONDITIONAL([HAVE_PGSQL],[test "${have_libpq}" = "yes"])
+AC_SUBST([POSTGRESQL_CFLAGS], [$POSTGRESQL_CFLAGS])
+AC_SUBST([POSTGRESQL_LDFLAGS], [$POSTGRESQL_LDFLAGS])
+AC_SUBST([POSTGRESQL_LIBDIR], [$POSTGRESQL_LIBDIR])
+
+
+PKG_CHECK_MODULES([MARIADB], [libmariadb >= 3.0.9],[
+ AM_CONDITIONAL([HAVE_MARIADB],[true])],[
+ PKG_CHECK_MODULES([MARIADB], [mariadb >= 3.0.9],[
+ AM_CONDITIONAL([HAVE_MARIADB],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MARIADB],[false])
+ ])
+])
+
+PKG_CHECK_MODULES([SPANDSP], [spandsp >= 3.0],[
+ AM_CONDITIONAL([HAVE_SPANDSP],[true])],[
+ AC_MSG_ERROR([no usable spandsp; please install spandsp3 devel package or equivalent])
+])
+
+PKG_CHECK_MODULES([SOFIA_SIP], [sofia-sip-ua >= 1.12.12],[
+ AM_CONDITIONAL([HAVE_SOFIA_SIP],[true])],[
+ AC_MSG_ERROR([no usable sofia-sip; please install sofia-sip-ua devel package or equivalent])
+])
+
+AC_ARG_ENABLE(deprecated-core-db-events,
+ [AS_HELP_STRING([--enable-deprecated-core-db-events], [Keep deprecated core db events])],,[enable_deprecated_core_db_events="no"])
+
+if test x"$enable_deprecated_core_db_events" = x"yes" ; then
+ AC_DEFINE([SWITCH_DEPRECATED_CORE_DB], [1], [Define to 1 to enable deprecated core db events])
+fi
+
+ESL_LDFLAGS=
+PLATFORM_CORE_LDFLAGS=
+PLATFORM_CORE_LIBS=
+
+# tweak platform specific flags
+case "$host" in
+ *darwin*)
+ # Common Apple Darwin settings
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DMACOSX)
+ APR_REMOVEFROM(SWITCH_AM_CFLAGS, -fPIC)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/opt/openssl/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/opt/openssl/lib)
+ if test "x$enable_core_odbc_support" != "xno"; then
+ APR_ADDTO([PLATFORM_CORE_LDFLAGS], [--framework CoreFoundation])
+ fi
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl])
+ # Get OSX and clang version
+ osxvrm=`sw_vers -productVersion` # Get version.release.modlevel
+ osxrel=`echo $osxvrm | cut -d. -f2` # Get release only
+ clangvers="`clang -v 2>&1 >/dev/null | grep version | sed -e 's/.*version \([[0-9]]*\).*$/\1/'`"
+ if test "$clangvers" -ge 6; then # Xcode 6 drops std lib search, add it to clang
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ fi
+ if test "$clangvers" -ge 4; then # Xcode 4 / 10.7 and up
+ APR_ADDTO(CFLAGS, -Wno-deprecated-declarations)
+ fi
+ if test "$osxrel" -ge 6; then # 10.6 and up
+ APR_ADDTO(CFLAGS, -pipe -no-cpp-precomp)
+ APR_ADDTO(LDFLAGS, -pipe -bind_at_load)
+ APR_ADDTO(CXXFLAGS, -pipe)
+ fi
+ ;;
+ *-solaris2*)
+ if test "${enable_64}" = "yes"; then
+ APR_ADDTO(CPPFLAGS, [-I/opt/64/include])
+ APR_ADDTO(LDFLAGS, [-L/opt/64/lib -Wl,-rpath,/opt/64/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, [-I/opt/64/include])
+ APR_ADDTO(SWITCH_AM_LDFLAGS, [-L/opt/64/lib -Wl,-rpath,/opt/64/lib])
+ else
+ APR_ADDTO(CPPFLAGS, [-I/opt/include])
+ APR_ADDTO(LDFLAGS, [-L/opt/lib -Wl,-rpath,/opt/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, [-I/opt/include])
+ APR_ADDTO(SWITCH_AM_LDFLAGS, [-L/opt/lib -Wl,-rpath,/opt/lib])
+ fi
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DPATH_MAX=2048 -D__EXTENSIONS__)
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -lsendfile -lresolv -lsocket -lnsl -luuid)
+ APR_ADDTO(ESL_LDFLAGS, -lnsl -lsocket)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl -lcrypt -lrt -lsendfile -lresolv -lsocket -lnsl -luuid])
+ ;;
+ *dragonfly*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ ;;
+ *openbsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib -ltermcap)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ ;;
+ *netbsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/pkg/include)
+ APR_ADDTO(LDFLAGS, [-L/usr/pkg/lib -Wl,-rpath,/usr/pkg/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/pkg/include)
+ ;;
+ *bsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-lcrypt -lrt])
+ ;;
+ *linux*)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl -lcrypt -lrt])
+ ;;
+esac
+
+APR_REMOVEFROM(SWITCH_AM_CXXFLAGS, -std=c99)
+
+# check for usable system MD5 library
+AS_CASE([$host],
+ [*-solaris2*], [AC_CHECK_LIB(md5, MD5Init)],
+ [*-freebsd*], [AC_CHECK_LIB(md, MD5Init)],
+ [*-openbsd*|*-netbsd*], [AC_CHECK_FUNCS([MD5Init])])
+
+AC_CHECK_LIB(z, inflateReset, have_libz=yes, AC_MSG_ERROR([no usable zlib; please install zlib devel package or equivalent]))
+if test "x$have_libz" = "xyes" ; then
+APR_ADDTO([PLATFORM_CORE_LIBS], [-lz])
+fi
+
+PKG_CHECK_MODULES([MPG123], [libmpg123 >= 1.16.0],[
+ AM_CONDITIONAL([HAVE_MPG123],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MPG123],[false])])
+
+PKG_CHECK_MODULES([AMR], [opencore-amrnb >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_AMR],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMR],[false])])
+
+PKG_CHECK_MODULES([AMRWB], [opencore-amrwb >= 0.1.0 vo-amrwbenc >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_AMRWB],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMRWB],[false])])
+
+AC_CHECK_LIB(apr-1, apr_pool_mutex_set, use_system_apr=yes, use_system_apr=no)
+AM_CONDITIONAL([SYSTEM_APR],[test "${use_system_apr}" = "yes"])
+AC_CHECK_LIB(aprutil-1, apr_queue_pop_timeout, use_system_aprutil=yes, use_system_aprutil=no)
+AM_CONDITIONAL([SYSTEM_APRUTIL],[test "${use_system_aprutil}" = "yes"])
+
+save_LIBS="$LIBS"
+LIBS=
+AC_CHECK_LIB(jpeg, jpeg_std_error,, AC_MSG_ERROR([no usable libjpeg; please install libjpeg devel package or equivalent]))
+
+AC_CHECK_LIB(jbig, jbg_enc_out, have_libjbig=yes, have_libjbig=no)
+if test "x$have_libjbig" = "xyes" ; then
+SPANDSP_LA_JBIG="-ljbig $LIBS"
+AC_SUBST(SPANDSP_LA_JBIG)
+fi
+LIBS="$save_LIBS"
+
+AC_CHECK_LIB(lzma, lzma_code, have_liblzma=yes, have_liblzma=no)
+if test "x$have_liblzma" = "xyes" ; then
+SPANDSP_LA_LZMA="-llzma"
+AC_SUBST(SPANDSP_LA_LZMA)
+fi
+
+AC_CHECK_LIB(resolv, res_init, have_libresolv=yes, have_libresolv=no)
+if test "x$have_libresolv" = "xyes" ; then
+APR_ADDTO(SWITCH_AM_LDFLAGS, -lresolv)
+fi
+
+AC_SUBST(SWITCH_AM_CFLAGS)
+AC_SUBST(SWITCH_ANSI_CFLAGS)
+AC_SUBST(SWITCH_AM_CXXFLAGS)
+AC_SUBST(SWITCH_AM_CPPFLAGS)
+AC_SUBST(SWITCH_AM_LDFLAGS)
+AC_SUBST(ESL_LDFLAGS)
+AC_SUBST(PLATFORM_CORE_LDFLAGS)
+AC_SUBST(PLATFORM_CORE_LIBS)
+AC_SUBST(SOLINK)
+AC_SUBST(LIBTOOL_LIB_EXTEN)
+
+# Checks for header files.
+AC_HEADER_DIRENT
+AC_HEADER_STDC
+AC_CHECK_HEADERS([sys/types.h sys/resource.h sched.h wchar.h sys/filio.h sys/ioctl.h sys/prctl.h sys/select.h netdb.h execinfo.h sys/time.h])
+
+# Solaris 11 privilege management
+AS_CASE([$host],
+ [*-*-solaris2.11], [AC_CHECK_HEADER([priv.h], [AC_DEFINE([SOLARIS_PRIVILEGES],[1],[Solaris 11 privilege management])])]
+)
+
+
+if test x"$ac_cv_header_wchar_h" = xyes; then
+ HAVE_WCHAR_H_DEFINE=1
+else
+ HAVE_WCHAR_H_DEFINE=0
+fi
+AC_SUBST(HAVE_WCHAR_H_DEFINE)
+
+# Needed by Abyss on Solaris:
+
+if test x"$ac_cv_header_sys_filio_h" = xyes; then
+ HAVE_SYS_FILIO_H_DEFINE=1
+else
+ HAVE_SYS_FILIO_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_FILIO_H_DEFINE)
+
+# Needed by Abyss on Solaris:
+
+if test x"$ac_cv_header_sys_ioctl_h" = xyes; then
+ HAVE_SYS_IOCTL_H_DEFINE=1
+else
+ HAVE_SYS_IOCTL_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_IOCTL_H_DEFINE)
+
+if test x"$ac_cv_header_sys_select_h" = xyes; then
+ HAVE_SYS_SELECT_H_DEFINE=1
+else
+ HAVE_SYS_SELECT_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_SELECT_H_DEFINE)
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_C_INLINE
+AC_TYPE_SIZE_T
+AC_HEADER_TIME
+AC_STRUCT_TM
+
+# Checks for library functions.
+AC_PROG_GCC_TRADITIONAL
+AC_FUNC_MALLOC
+AC_TYPE_SIGNAL
+AC_FUNC_STRFTIME
+AC_CHECK_FUNCS([gethostname vasprintf mmap mlock mlockall usleep getifaddrs timerfd_create getdtablesize posix_openpt poll])
+AC_CHECK_FUNCS([sched_setscheduler setpriority setrlimit setgroups initgroups getrusage])
+AC_CHECK_FUNCS([wcsncmp setgroups asprintf setenv pselect gettimeofday localtime_r gmtime_r strcasecmp stricmp _stricmp])
+
+# Check availability and return type of strerror_r
+# (NOTE: apr-1-config sets -D_GNU_SOURCE at build-time, need to run the check with it too)
+save_CPPFLAGS="$CPPFLAGS"
+CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+AC_FUNC_STRERROR_R
+CPPFLAGS="$save_CPPFLAGS"
+
+AX_HAVE_CPU_SET
+
+AC_CHECK_LIB(rt, clock_gettime, [AC_DEFINE(HAVE_CLOCK_GETTIME, 1, [Define if you have clock_gettime()])])
+AC_CHECK_LIB(rt, clock_getres, [AC_DEFINE(HAVE_CLOCK_GETRES, 1, [Define if you have clock_getres()])])
+AC_CHECK_LIB(rt, clock_nanosleep, [AC_DEFINE(HAVE_CLOCK_NANOSLEEP, 1, [Define if you have clock_nanosleep()])])
+AC_CHECK_LIB(pthread, pthread_setschedparam, [AC_DEFINE(HAVE_PTHREAD_SETSCHEDPARAM, 1, [Define if you have pthread_setschedparam()])])
+
+AC_CHECK_FUNC(socket, , AC_CHECK_LIB(socket, socket))
+
+AC_CHECK_FILE(/dev/ptmx, [AC_DEFINE(HAVE_DEV_PTMX, 1, [Define if you have /dev/ptmx])])
+AC_CHECK_LIB(util, openpty, [AC_DEFINE(HAVE_OPENPTY, 1, [Define if you have openpty()])])
+
+AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[
+#include
+#include ])
+
+AC_CHECK_MEMBERS([struct tm.tm_zone],,,[
+#include
+#include ])
+
+AC_CHECK_DECL([RLIMIT_MEMLOCK],
+ [AC_DEFINE([HAVE_RLIMIT_MEMLOCK],[1],[RLIMIT_MEMLOCK constant for setrlimit])],,
+ [#ifdef HAVE_SYS_RESOURCE_H
+ #include
+ #endif])
+
+AC_CHECK_DECL([SCHED_RR],
+ [AC_DEFINE([HAVE_SCHED_RR],[1],[SCHED_RR constant for sched_setscheduler])],,
+ [#ifdef HAVE_SCHED_H
+ #include
+ #endif])
+
+AC_CHECK_DECL([SCHED_FIFO],
+ [AC_DEFINE([HAVE_SCHED_FIFO],[1],[SCHED_FIFO constant for sched_setscheduler])],,
+ [#ifdef HAVE_SCHED_H
+ #include
+ #endif])
+
+#
+# use mlockall only on linux (for now; if available)
+#
+if test "x${ac_cv_func_mlockall}" = "xyes"; then
+ AC_MSG_CHECKING([whether to use mlockall])
+ case "$host" in
+ *-linux-*)
+ AC_DEFINE([USE_MLOCKALL],[1],[Enable mlockall support])
+ AC_MSG_RESULT([yes])
+ USE_MLOCKALL=yes
+ ;;
+ *-freebsd*)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC)
+ AC_MSG_RESULT([no, broken for non-root users])
+ ;;
+ *)
+ AC_MSG_RESULT([no])
+ ;;
+ esac
+
+ #
+ # setrlimit prerequisites
+ #
+ if test "x${USE_MLOCKALL}" = "xyes" -a \
+ "x${ac_cv_func_setrlimit}" = "xyes" -a \
+ "x${ac_cv_have_decl_RLIMIT_MEMLOCK}" = "xyes"
+ then
+ AC_DEFINE([USE_SETRLIMIT],[1],[Use setrlimit to disable mlock limit for non-root users])
+ fi
+fi
+
+#
+# sched_setcheduler + round-robin scheduler prerequisites
+#
+if test "x${ac_cv_func_sched_setscheduler}" = "xyes" -a \
+ "x${ac_cv_have_decl_SCHED_RR}" = "xyes"
+then
+ AC_DEFINE([USE_SCHED_SETSCHEDULER],[1],[Enable round-robin scheduler using sched_setscheduler])
+fi
+#
+# xmlrpc-c checks
+#
+
+AC_CHECK_FUNCS(setenv strtoll strtoull strtoq strtouq __strtoll __strtoull)
+
+HAVE_LIBWWW_SSL_DEFINE=0
+AC_SUBST(HAVE_LIBWWW_SSL_DEFINE)
+
+DIRECTORY_SEPARATOR="/"
+AC_SUBST(DIRECTORY_SEPARATOR)
+
+va_list_is_array=no
+AC_MSG_CHECKING(whether va_list is an array)
+AC_TRY_COMPILE([
+#include
+], [va_list list1, list2; list1 = list2;], ,
+va_list_is_array=yes)
+AC_MSG_RESULT($va_list_is_array)
+if test x"$va_list_is_array" = xyes; then
+ VA_LIST_IS_ARRAY_DEFINE=1
+else
+ VA_LIST_IS_ARRAY_DEFINE=0
+fi
+AC_SUBST(VA_LIST_IS_ARRAY_DEFINE)
+
+
+AC_MSG_CHECKING(whether compiler has __attribute__)
+AC_TRY_COMPILE(, [int x __attribute__((__unused__));],
+compiler_has_attribute=yes,
+compiler_has_attribute=no)
+AC_MSG_RESULT($compiler_has_attribute)
+if test x"$compiler_has_attribute" = xyes; then
+ ATTR_UNUSED="__attribute__((__unused__))"
+else
+ ATTR_UNUSED=
+fi
+AC_SUBST(ATTR_UNUSED)
+
+
+saved_CFLAGS="$CFLAGS"
+AC_CACHE_CHECK([whether compiler supports -Wdeclaration-after-statement], [ac_cv_gcc_declaration_after_statement], [
+CFLAGS="$CFLAGS -Wdeclaration-after-statement"
+AC_TRY_COMPILE([],[return 0;],[ac_cv_gcc_declaration_after_statement=yes],[ac_cv_gcc_declaration_after_statement=no])
+])
+AC_MSG_RESULT($ac_cv_gcc_declaration_after_statement)
+if test x"$ac_cv_gcc_declaration_after_statement" = xyes; then
+ APR_ADDTO(SWITCH_ANSI_CFLAGS, -Wdeclaration-after-statement)
+fi
+CFLAGS="$saved_CFLAGS"
+
+if test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ saved_CFLAGS="$CFLAGS"
+ # Next check added for Xcode 5 and systems with clang 5 llvm 3.3 or above, extended offset must be off
+ AC_CACHE_CHECK([whether compiler supports -Wextended-offsetof], [ac_cv_clang_extended_offsetof], [
+ CFLAGS="$CFLAGS -Wno-extended-offsetof"
+ AC_TRY_COMPILE([],[return 0;],[ac_cv_clang_extended_offsetof=yes],[ac_cv_clang_extended_offsetof=no])
+ ])
+ AC_MSG_RESULT($ac_cv_clang_extended_offsetof)
+ if test x"$ac_cv_clang_extended_offsetof" = xyes; then
+ APR_ADDTO(CFLAGS, -Wno-extended-offsetof)
+ fi
+ CFLAGS="$saved_CFLAGS"
+fi
+
+# Tested and fixed lot of modules, but some are untested. Will be added back when the core team decide it ready
+# Untested modules : mod_osp mod_soundtouch mod_sangoma_codec mod_dingaling mod_opal mod_h323 mod_khomp
+# mod_unimrcp mod_cepstral mod_erlang_event mod_snmp mod_perl mod_java mod_managed
+#
+#saved_CFLAGS="$CFLAGS"
+#AC_CACHE_CHECK([whether compiler supports -Wunused-but-set-variable], [ac_cv_gcc_unused_but_set_variable], [
+#CFLAGS="$CFLAGS -Wunused-but-set-variable"
+#AC_TRY_COMPILE([],[return 0;],[ac_cv_gcc_unused_but_set_variable=yes],[ac_cv_gcc_unused_but_set_variable=no])
+#])
+#AC_MSG_RESULT($ac_cv_gcc_unused_but_set_variable)
+#if test x"$ac_cv_gcc_unused_but_set_variable" = xyes; then
+# APR_ADDTO(SWITCH_ANSI_CFLAGS, -Wunused-but-set-variable)
+#fi
+#CFLAGS="$saved_CFLAGS"
+
+AC_C_BIGENDIAN(AC_DEFINE([SWITCH_BYTE_ORDER],__BIG_ENDIAN,[Big Endian]),AC_DEFINE([SWITCH_BYTE_ORDER],__LITTLE_ENDIAN,[Little Endian]))
+
+# Checks for integer size
+AC_CHECK_SIZEOF(char, 1)
+AC_CHECK_SIZEOF(int, 4)
+AC_CHECK_SIZEOF(long, 4)
+AC_CHECK_SIZEOF(short, 2)
+AC_CHECK_SIZEOF(long long, 8)
+AC_TYPE_SIZE_T
+AC_CHECK_TYPE(ssize_t, int)
+
+# Checks for pointer size
+AC_CHECK_SIZEOF(void*, 4)
+
+if test "x$ac_cv_sizeof_voidp" != "x"; then
+ voidp_size=$ac_cv_sizeof_voidp
+else
+ AC_ERROR([Cannot determine size of void*])
+fi
+
+if test "$ac_cv_sizeof_short" = "2"; then
+ short_value=short
+fi
+if test "$ac_cv_sizeof_int" = "4"; then
+ int_value=int
+fi
+
+if test "$ac_cv_sizeof_int" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "d"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "u"'
+ int64_value="int"
+ long_value=int
+elif test "$ac_cv_sizeof_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "ld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "lu"'
+ int64_value="long"
+ long_value=long
+ case "$host" in
+ *pc-solaris2*)
+ ;;
+ sparc-*-solaris2*)
+ ;;
+ *-solaris2*|*apple-darwin*|*-openbsd*)
+ if test "$ac_cv_sizeof_long_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "lld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "llu"'
+ int64_value="long long"
+ long_value="long long"
+ fi
+ ;;
+ esac
+elif test "$ac_cv_sizeof_long_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "lld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "llu"'
+ int64_value="long long"
+ long_value="long long"
+elif test "$ac_cv_sizeof_longlong" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "qd"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "qu"'
+ int64_value="__int64"
+ long_value="__int64"
+else
+ AC_ERROR([could not detect a 64-bit integer type])
+fi
+
+if test "$ac_cv_type_size_t" = "yes"; then
+ size_t_value="size_t"
+else
+ size_t_value="switch_int32_t"
+fi
+
+if test "$ac_cv_type_ssize_t" = "yes"; then
+ ssize_t_value="ssize_t"
+else
+ ssize_t_value="switch_int32_t"
+fi
+
+APR_CHECK_SIZEOF_EXTENDED([#include ], ssize_t, 8)
+
+if test "$ac_cv_sizeof_ssize_t" = "$ac_cv_sizeof_int"; then
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "d"'
+elif test "$ac_cv_sizeof_ssize_t" = "$ac_cv_sizeof_long"; then
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+else
+ ssize_t_fmt='#error Can not determine the proper size for ssize_t'
+fi
+
+APR_CHECK_SIZEOF_EXTENDED([#include ], size_t, 8)
+
+if test "$ac_cv_sizeof_size_t" = "$ac_cv_sizeof_int"; then
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "d"'
+elif test "$ac_cv_sizeof_size_t" = "$ac_cv_sizeof_long"; then
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+else
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "zu"'
+fi
+
+# Basically, we have tried to figure out the correct format strings
+# for SWITCH types which vary between platforms, but we don't always get
+# it right. If you find that we don't get it right for your platform,
+# you can override our decision below.
+# NOTE: borrowed much of this logic from apr.
+case $host in
+ s390*linux*)
+ # uniquely, the 31-bit Linux/s390 uses "unsigned long int"
+ # for size_t rather than "unsigned int":
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ ;;
+ *-os2*)
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ;;
+ *-openbsd*)
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *aix4*|*aix5*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *beos*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *apple-darwin*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ;;
+esac
+
+AC_SUBST(voidp_size)
+AC_SUBST(short_value)
+AC_SUBST(int_value)
+AC_SUBST(long_value)
+AC_SUBST(int64_value)
+AC_SUBST(size_t_value)
+AC_SUBST(ssize_t_value)
+AC_SUBST(int64_t_fmt)
+AC_SUBST(uint64_t_fmt)
+AC_SUBST(ssize_t_fmt)
+AC_SUBST(size_t_fmt)
+
+case $host in
+ *-openbsd*)
+ # OpenBSD's gunzip and friends don't like -d because its redundant, only gzip does
+ AC_PATH_PROGS(ZCAT, gzip)
+ ;;
+ *)
+ AC_PATH_PROGS(ZCAT, gunzip gzcat gzip zcat)
+ ;;
+esac
+
+AC_PATH_PROGS(BZIP, bzip2)
+AC_PATH_PROGS(XZ, xz)
+AC_PATH_PROGS(TAR, gtar tar)
+AC_PATH_PROGS(WGET, wget)
+AC_PATH_PROGS(CURL, curl)
+GETLIB="cd $switch_srcdir/libs && ${SHELL} $switch_builddir/build/getlib.sh"
+AC_SUBST(GETLIB)
+GETG729="cd $switch_srcdir/libs && ${SHELL} $switch_builddir/build/getg729.sh"
+AC_SUBST(GETG729)
+GETSOUNDS="${SHELL} $switch_builddir/build/getsounds.sh"
+AC_SUBST(GETSOUNDS)
+
+case $host in
+ *-darwin*)
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/curl/lib/pkgconfig
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/sqlite/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/ldns/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/portaudio/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/ffmpeg/lib/pkgconfig/
+ ;;
+esac
+
+if ! (test -x "$PKG_CONFIG" || test -x "$(which pkg-config)"); then
+ AC_MSG_ERROR([You need to install pkg-config to configure FreeSWITCH.])
+fi
+
+# temporary workaround for Debian libldns-dev package bug
+if test "$cross_compiling" != "yes" && test -f /usr/lib/pkg-config/libldns.pc; then
+ path_push_unique PKG_CONFIG_PATH /usr/lib/pkg-config
+fi
+
+module_enabled() {
+ grep -v -e "\#" -e "^\$" modules.conf | sed 's/|.*//' | sed -e "s|^.*/||" | grep "^${1}\$" >/dev/null
+}
+
+AC_ARG_WITH(png,
+ [AS_HELP_STRING([--without-png],
+ [disable support for libpng])],
+ [with_png="$withval"],
+ [with_png="yes"])
+if test "$with_png" = "yes"; then
+ PKG_CHECK_MODULES([LIBPNG], [libpng >= 1.6.16],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ PKG_CHECK_MODULES([LIBPNG], [libpng16 >= 1.6.16],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ PKG_CHECK_MODULES([LIBPNG], [libpng >= 1.2.49],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_PNG],[false])])])])
+else
+ AM_CONDITIONAL([HAVE_PNG],[false])
+fi
+
+AC_ARG_WITH(freetype,
+ [AS_HELP_STRING([--without-freetype],
+ [disable support for freetype])],
+ [with_freetype="$withval"],
+ [with_freetype="yes"])
+if test "$with_freetype" = "yes"; then
+ PKG_CHECK_MODULES([FREETYPE], [freetype2 >= 2.4.9],[
+ AM_CONDITIONAL([HAVE_FREETYPE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_FREETYPE],[false])])
+else
+ AM_CONDITIONAL([HAVE_FREETYPE],[false])
+fi
+
+PKG_CHECK_MODULES([GUMBO], [gumbo >= 0.10.1],[
+ AM_CONDITIONAL([HAVE_GUMBO],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GUMBO],[false])])
+
+PKG_CHECK_MODULES([FVAD], [libfvad >= 1.0],[
+ AM_CONDITIONAL([HAVE_FVAD],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_FVAD],[false])])
+
+PKG_CHECK_MODULES([TPL], [libtpl >= 1.5],[
+ AC_DEFINE([HAVE_LIBTPL],[1],[Define to 1 if you have libtpl])],[
+ AC_MSG_RESULT([no])])
+
+PKG_CHECK_MODULES([SQLITE], [sqlite3 >= 3.6.20])
+PKG_CHECK_MODULES([CURL], [libcurl >= 7.19])
+PKG_CHECK_MODULES([PCRE], [libpcre >= 7.8])
+PKG_CHECK_MODULES([SPEEX], [speex >= 1.2rc1 speexdsp >= 1.2rc1])
+PKG_CHECK_MODULES([YAML], [yaml-0.1 >= 0.1.4],[
+ AM_CONDITIONAL([HAVE_YAML],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_YAML],[false])])
+PKG_CHECK_MODULES([PORTAUDIO], [portaudio-2.0 >= 19],[
+ AM_CONDITIONAL([HAVE_PORTAUDIO],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_PORTAUDIO],[false])])
+PKG_CHECK_MODULES([LDNS], [libldns-fs >= 1.6.6],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])],[
+PKG_CHECK_MODULES([LDNS], [libldns >= 1.6.6],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])],[
+ AC_CHECK_LIB([ldns], [ldns_str2rdf_a], [LDNS_LIBS=-lldns])
+ AS_IF([test -z "$LDNS_LIBS"],[
+ if module_enabled mod_enum; then
+ AC_MSG_ERROR([You need to either install libldns-dev or disable mod_enum in modules.conf])
+ else
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LDNS],[false])
+ fi],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])])])])
+PKG_CHECK_MODULES([SNDFILE], [sndfile >= 1.0.20],[
+ AM_CONDITIONAL([HAVE_SNDFILE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SNDFILE],[false])])
+
+PKG_CHECK_MODULES([MPG123], [libmpg123 >= 1.16.0],[
+ AM_CONDITIONAL([HAVE_MPG123],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MPG123],[false])])
+
+PKG_CHECK_MODULES([SHOUT], [shout >= 2.2.2],[
+ AM_CONDITIONAL([HAVE_SHOUT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SHOUT],[false])])
+
+mp3lame=false
+AC_CHECK_LIB([mp3lame], [lame_init],[
+ AC_CHECK_HEADER([lame/lame.h],[
+ mp3lame=true
+ AC_SUBST([MP3LAME_LIBS], [-lmp3lame])
+ AC_SUBST([MP3LAME_CFLAGS], [$CPPFLAGS])])])
+AM_CONDITIONAL([HAVE_MP3LAME],[$mp3lame])
+
+PKG_CHECK_MODULES([AVCODEC], [libavcodec >= 53.35.0],[
+ AM_CONDITIONAL([HAVE_AVCODEC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVCODEC],[false])])
+
+PKG_CHECK_MODULES([X264], [x264 >= 0.142.2431],[
+ AM_CONDITIONAL([HAVE_X264],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_X264],[false])])
+
+PKG_CHECK_MODULES([AVFORMAT], [libavformat >= 53.21.1],[
+ AM_CONDITIONAL([HAVE_AVFORMAT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVFORMAT],[false])])
+
+PKG_CHECK_MODULES([AVUTIL], [libavutil >= 54.3.0],[
+ AM_CONDITIONAL([HAVE_AVUTIL],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVUTIL],[false])])
+
+PKG_CHECK_MODULES([AVRESAMPLE], [libavresample >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_AVRESAMPLE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVRESAMPLE],[false])])
+
+PKG_CHECK_MODULES([SWRESAMPLE], [libswresample >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_SWRESAMPLE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SWRESAMPLE],[false])])
+
+PKG_CHECK_MODULES([SWSCALE], [libswscale >= 3.0.0],[
+ AM_CONDITIONAL([HAVE_SWSCALE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SWSCALE],[false])])
+
+PKG_CHECK_MODULES([VLC], [libvlc >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_VLC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_VLC],[false])])
+
+PKG_CHECK_MODULES([OPENCV], [opencv >= 2.4.5],[
+ AM_CONDITIONAL([HAVE_OPENCV],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPENCV],[false])])
+
+PKG_CHECK_MODULES([OPUSFILE_DECODE], [opusfile >= 0.5],[
+ AM_CONDITIONAL([HAVE_OPUSFILE_DECODE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUSFILE_DECODE],[false])])
+PKG_CHECK_MODULES([OPUSFILE_ENCODE], [libopusenc >= 0.1],[
+ AM_CONDITIONAL([HAVE_OPUSFILE_ENCODE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUSFILE_ENCODE],[false])])
+
+
+PKG_CHECK_MODULES([MAGICK], [ImageMagick >= 6.0.0],[
+ AM_CONDITIONAL([HAVE_MAGICK],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MAGICK],[false])])
+
+PKG_CHECK_MODULES([MAGICK7], [ImageMagick >= 7.0.0],[
+ AM_CONDITIONAL([HAVE_MAGICK7],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MAGICK7],[false])])
+
+PKG_CHECK_MODULES([SILK], [silk >= 1.0.8],[
+ AM_CONDITIONAL([HAVE_SILK],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SILK],[false])])
+
+PKG_CHECK_MODULES([BROADVOICE], [broadvoice >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROADVOICE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROADVOICE],[false])])
+
+PKG_CHECK_MODULES([ILBC], [ilbc2 >= 0.0.1],[
+ AM_CONDITIONAL([HAVE_ILBC],[true])],[
+ PKG_CHECK_MODULES([ILBC], [ilbc >= 0.0.1],[
+ AM_CONDITIONAL([HAVE_ILBC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_ILBC],[false])])])
+
+PKG_CHECK_MODULES([G7221], [g722_1 >= 0.2.0],[
+ AM_CONDITIONAL([HAVE_G7221],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_G7221],[false])])
+
+codec2="true"
+PKG_CHECK_MODULES([CODEC2], [codec2 >= 0.5],[],[
+ AC_CHECK_LIB([codec2], [codec2_create],[
+ AC_CHECK_HEADERS([codec2/codec2.h],[
+ CODEC2_LIBS="-lcodec2"
+ CODEC2_CFLAGS=""
+ ], [
+ codec2="false"
+ if module_enabled mod_codec2; then
+ AC_MSG_ERROR([You must install libcodec2-dev to build mod_codec2])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ])
+ ])
+])
+
+AM_CONDITIONAL([HAVE_CODEC2],[$codec2])
+
+
+PKG_CHECK_MODULES([OPUS], [opus >= 1.1],[
+ AM_CONDITIONAL([HAVE_OPUS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUS],[false])])
+
+PKG_CHECK_MODULES([SOUNDTOUCH], [soundtouch >= 1.7.0],[
+ AM_CONDITIONAL([HAVE_SOUNDTOUCH],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SOUNDTOUCH],[false])])
+
+flite="true"
+PKG_CHECK_MODULES([FLITE], [flite >= 2],[],[
+ AC_CHECK_LIB([flite], [flite_init],[
+ AC_CHECK_HEADERS([flite/flite.h],[
+ FLITE_LIBS="-lflite -lflite_cmu_grapheme_lang -lflite_cmu_grapheme_lex -lflite_cmu_indic_lang -lflite_cmu_indic_lex -lflite_cmulex -lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal16 -lflite_cmu_us_kal -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish"
+ FLITE_CFLAGS=""
+ ], [
+ flite="false"
+ if module_enabled mod_flite; then
+ AC_MSG_ERROR([You must install libflite-dev to build mod_flite])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ])
+ ])
+])
+
+AM_CONDITIONAL([HAVE_FLITE],[$flite])
+
+PKG_CHECK_MODULES([MONGOC], [libmongoc-1.0 >= 1.0.8],[
+ AM_CONDITIONAL([HAVE_MONGOC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MONGOC],[false])])
+
+PKG_CHECK_MODULES([MEMCACHED], [libmemcached >= 0.31],[
+ AM_CONDITIONAL([HAVE_MEMCACHED],[true])
+ MEMCACHED_LIBS="${MEMCACHED_LIBS} -lpthread"
+ save_LIBS="${LIBS}"
+ save_CPPFLAGS="${CPPFLAGS}"
+ LIBS="${MEMCACHED_LIBS}"
+ CPPFLAGS="${MEMCACHED_CFLAGS}"
+ AC_CHECK_FUNCS([memcached_server_name memcached_stat_execute])
+ AC_CHECK_TYPES([memcached_instance_st*],,, [[#include ]])
+ LIBS="${save_LIBS}"
+ CPPFLAGS="${save_CPPFLAGS}"
+],[
+ AC_MSG_RESULT([no])
+ AM_CONDITIONAL([HAVE_MEMCACHED],[false])
+])
+
+PKG_CHECK_MODULES([V8FS_STATIC], [v8-6.1_static >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ PKG_CHECK_MODULES([V8FS_STATIC], [v8fs_static >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ PKG_CHECK_MODULES([V8FS_STATIC], [v8 >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ if module_enabled mod_v8; then
+ AC_MSG_ERROR([You need to either install libv8-6.1-dev (>= 6.1.298), libv8fs-dev (>= 6.1.298) or disable mod_v8 in modules.conf])
+ else
+ AC_MSG_RESULT([no])
+ AM_CONDITIONAL([HAVE_V8FS],[false])
+ fi
+ ])
+ ])
+])
+
+PKG_CHECK_MODULES([KS], [libks >= 1.1.0],[
+ AM_CONDITIONAL([HAVE_KS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_KS],[false])])
+
+PKG_CHECK_MODULES([SIGNALWIRE_CLIENT], [signalwire_client >= 1.0.0],[
+ AM_CONDITIONAL([HAVE_SIGNALWIRE_CLIENT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SIGNALWIRE_CLIENT],[false])])
+
+PKG_CHECK_MODULES([AMQP], [librabbitmq >= 0.5.2],[
+ AM_CONDITIONAL([HAVE_AMQP],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMQP],[false])])
+
+PKG_CHECK_MODULES([H2O], [libh2o-evloop >= 0.11.0],[
+ AM_CONDITIONAL([HAVE_H2O],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_H2O],[false])])
+
+PKG_CHECK_MODULES([BROTLIENC], [libbrotlienc >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROTLIENC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROTLIENC],[false])])
+
+PKG_CHECK_MODULES([BROTLIDEC], [libbrotlidec >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROTLIDEC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROTLIDEC],[false])])
+
+PKG_CHECK_MODULES([TAP], [tap >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_TAP],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_TAP],[false])])
+
+PKG_CHECK_MODULES([SMPP34], [libsmpp34 >= 1.10],[
+ AM_CONDITIONAL([HAVE_SMPP34],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SMPP34],[false])])
+
+PKG_CHECK_MODULES([HIREDIS], [hiredis >= 0.10.0],[
+ AM_CONDITIONAL([HAVE_HIREDIS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_HIREDIS],[false])])
+
+AC_ARG_ENABLE(core-libedit-support,
+ [AS_HELP_STRING([--disable-core-libedit-support], [Compile without libedit Support])])
+
+AS_IF([test "x$enable_core_libedit_support" != "xno"],[
+ PKG_CHECK_MODULES([LIBEDIT], [libedit >= 2.11],,[
+ AC_MSG_RESULT([no])
+ AC_CHECK_LIB([edit], [el_line], [LIBEDIT_LIBS=-ledit])
+ AC_CHECK_LIB([edit], [el_cursor], [ac_cv_has_el_cursor=yes])
+ AC_CHECK_HEADER([histedit.h], [], [unset LIBEDIT_LIBS])
+ AS_IF([test "x$LIBEDIT_LIBS" = "x"], [
+ AC_MSG_ERROR([You need to either install libedit-dev (>= 2.11) or configure with --disable-core-libedit-support])
+ ])])])
+
+dnl DH: Added for including libwebsockets
+AC_ARG_WITH(lws,
+ [AS_HELP_STRING([--with-lws],
+ [enable support for libwebsockets])],
+ [with_lws="$withval"],
+ [with_lws="no"])
+if test "$with_lws" = "yes"; then
+ PKG_CHECK_MODULES([LWS], [libwebsockets], [
+ AM_CONDITIONAL([HAVE_LWS],[true])], [
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LWS],[false])])
+else
+ AM_CONDITIONAL([HAVE_LWS],[false])
+fi
+
+dnl DH: Added for including google protobuf libs
+AC_ARG_WITH(extra,
+ [AS_HELP_STRING([--with-extra],
+ [enable support for extra modules which require google rpc (libgrpc++ and libgrpc)])],
+ [with_extra="$withval"],
+ [with_extra="no"])
+if test "$with_extra" = "yes"; then
+ PKG_CHECK_MODULES([GRPC], [grpc++ grpc], [
+ AM_CONDITIONAL([HAVE_GRPC],[true])], [
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GRPC],[false])])
+else
+ AM_CONDITIONAL([HAVE_GRPC],[false])
+fi
+
+dnl ---------------------------------------------------------------------------
+dnl - OpenLDAP SDK
+dnl ---------------------------------------------------------------------------
+
+AC_CHECK_LIB(ldap, ldap_search, with_ldap=yes)
+dnl Check for other libraries we need to link with to get the main routines.
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes], , -llber) }
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes with_ldap_krb=yes], , -llber -lkrb) }
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes with_ldap_krb=yes with_ldap_des=yes], , -llber -lkrb -ldes) }
+test "$with_ldap_lber" != "yes" && { AC_CHECK_LIB(lber, ber_pvt_opt_on, with_ldap_lber=yes) }
+
+if test "$with_ldap" = "yes"; then
+ if test "$with_ldap_des" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -ldes"
+ fi
+ if test "$with_ldap_krb" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -lkrb"
+ fi
+ if test "$with_ldap_lber" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -llber"
+ fi
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -lldap"
+fi
+
+AM_CONDITIONAL([HAVE_LDAP],[test "x$with_ldap" = "xyes"])
+
+AC_SUBST(OPENLDAP_LIBS)
+
+AS_IF([test "x$enable_core_libedit_support" != "xno"], [
+ # If making changes here, don't forget to run autoheader and
+ # update libs/esl/src/include/esl_config_auto.h.in manually.
+ AC_DEFINE([HAVE_LIBEDIT], [1], [Define to 1 if you have libedit is available])
+if test x$ac_cv_has_el_cursor = xyes; then
+ AC_DEFINE([HAVE_EL_CURSOR], [1], [Define to 1 if you have libedit el_cursor support])
+fi
+ save_LIBS="${LIBS}"
+ save_CPPFLAGS="${CPPFLAGS}"
+ LIBS="${LIBEDIT_LIBS}"
+ CPPFLAGS="${LIBEDIT_CFLAGS}"
+ AC_CHECK_DECLS([EL_PROMPT_ESC, EL_REFRESH],,, [[#include ]])
+ AC_CHECK_FUNCS([el_wset])
+ LIBS="${save_LIBS}"
+ CPPFLAGS="${save_CPPFLAGS}"
+])
+
+SAC_OPENSSL
+
+if test x$HAVE_OPENSSL = x1; then
+ openssl_CFLAGS="$openssl_CFLAGS -DHAVE_OPENSSL";
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DHAVE_OPENSSL)
+ AC_CHECK_LIB(ssl, SSL_CTX_set_tlsext_use_srtp, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLS_SRTP, 1, HAVE_OPENSSL_DTLS_SRTP), AC_MSG_ERROR([OpenSSL >= 1.0.1e and associated developement headers required]))
+ AC_CHECK_LIB(ssl, DTLSv1_method, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLS, 1, HAVE_OPENSSL_DTLS), AC_MSG_ERROR([OpenSSL >= 1.0.1e and associaed developement headers required]))
+ AC_CHECK_LIB(ssl, DTLSv1_2_method, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLSv1_2_method, 1, [DTLS version 1.2 is available]))
+else
+ AC_MSG_ERROR([OpenSSL >= 1.0.1e and associated developement headers required])
+fi
+
+AX_CHECK_JAVA
+
+AM_CONDITIONAL([HAVE_ODBC],[test "x$enable_core_odbc_support" != "xno"])
+AM_CONDITIONAL([HAVE_MYSQL],[test "$found_mysql" = "yes"])
+
+#
+# perl checks
+#
+
+AC_CHECK_PROG(PERL,perl,[ac_cv_have_perl=yes],[ac_cv_have_perl=no])
+
+# -a "x$ac_cv_have_EXTERN_h" != "xno"
+
+if test "x$ac_cv_have_perl" != "xno"; then
+ PERL=perl
+ PERL_SITEDIR="`$PERL -MConfig -e 'print $Config{archlib}'`"
+ PERL_LIBDIR="-L`$PERL -MConfig -e 'print $Config{archlib}'`/CORE"
+ PERL_LIBS="`$PERL -MConfig -e 'print $Config{libs}'`"
+ PERL_CFLAGS="-w -DMULTIPLICITY `$PERL -MExtUtils::Embed -e ccopts | sed -e 's|-arch x86_64 -arch i386||'` -DEMBED_PERL"
+ PERL_LDFLAGS="`$PERL -MExtUtils::Embed -e ldopts| sed -e 's|-arch x86_64 -arch i386||'`"
+ PERL_INC="`$PERL -MExtUtils::Embed -e perl_inc`"
+
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$PERL_CFLAGS"
+ AC_CHECK_HEADER([EXTERN.h], [ac_cv_have_EXTERN_h=yes], [ac_cv_have_EXTERN_h=no], [[#include
+# include ]])
+ CFLAGS="$save_CFLAGS"
+
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$PERL_LDFLAGS"
+ AC_CHECK_LIB([perl], [perl_alloc], ac_cv_use_libperl=yes, ac_cv_use_libperl=no)
+ LDFLAGS="$save_LDFLAGS"
+
+ AC_SUBST(PERL_SITEDIR)
+ AC_SUBST(PERL_LIBDIR)
+ AC_SUBST(PERL_LIBS)
+ AC_SUBST(PERL_CFLAGS)
+ AC_SUBST(PERL_LDFLAGS)
+ AC_SUBST(PERL_INC)
+fi
+
+AM_CONDITIONAL([HAVE_PERL],[test "x$ac_cv_have_perl" != "xno" -a "x$ac_cv_have_EXTERN_h" != "xno" -a "x$ac_cv_use_libperl" != "xno"])
+
+#
+# php checks
+#
+
+AC_CHECK_PROG(PHP,php,[ac_cv_have_php=yes],[ac_cv_have_php=no])
+AC_CHECK_PROG(PHP_CONFIG,php-config,[ac_cv_have_php_config=yes],[ac_cv_have_php_config=no])
+AM_CONDITIONAL([HAVE_PHP],[test "x$ac_cv_have_php" != "xno" -a "x$ac_cv_have_php_config" != "xno"])
+
+if test "x$ac_cv_have_php" != "xno" -a "x$ac_cv_have_php_config" != "xno"; then
+ PHP=php
+ PHP_CONFIG=php-config
+ PHP_LDFLAGS="`$PHP_CONFIG --ldflags`"
+ PHP_LIBS="`$PHP_CONFIG --libs | sed -r 's/ ?-l(bz2|pcre2-8|xml2|gssapi_krb5|krb5|k5crypto|com_err|history|z|readline|gmp|ssl|crypto|argon2|sodium)//g'`"
+ PHP_EXT_DIR="`$PHP_CONFIG --extension-dir`"
+ PHP_INC_DIR="`$PHP -r 'echo ini_get("include_path");' | cut -d: -f2`"
+ PHP_INI_DIR="`$PHP_CONFIG --configure-options | tr " " "\n" | grep -- --with-config-file-scan-dir | cut -f2 -d=`"
+ PHP_CFLAGS="`$PHP_CONFIG --includes`"
+ AC_SUBST(PHP_LDFLAGS)
+ AC_SUBST(PHP_LIBS)
+ AC_SUBST(PHP_EXT_DIR)
+ AC_SUBST(PHP_INC_DIR)
+ AC_SUBST(PHP_INI_DIR)
+ AC_SUBST(PHP_CFLAGS)
+fi
+
+#
+# Python checks for mod_python
+#
+AC_ARG_WITH(
+ [python],
+ [AS_HELP_STRING([--with-python], [Use system provided version of python (default: try)])],
+ [with_python="$withval"],
+ [with_python="try"]
+)
+
+if test "$with_python" != "no"
+then
+ save_CFLAGS="$CFLAGS"
+ save_LIBS="$LIBS"
+
+ if test "$with_python" != "yes" -a "$with_python" != "try" ; then
+ AC_MSG_CHECKING([for python])
+ if test ! -x "$with_python" ; then
+ AC_MSG_ERROR([Specified python does not exist or is not executable: $with_python])
+ fi
+ AC_MSG_RESULT([$with_python])
+ AC_SUBST([PYTHON], ["$with_python"])
+ else
+ AC_PATH_PROG([PYTHON], ["python"], ["no"], ["$PATH:/usr/bin:/usr/local/bin"])
+ fi
+
+ if test "$PYTHON" != "no" ; then
+ AC_MSG_CHECKING([python version])
+ PYTHON_VER="`$PYTHON -V 2>&1 | cut -d' ' -f2`"
+
+ if test -z "$PYTHON_VER" ; then
+ AC_MSG_ERROR([Unable to detect python version])
+ fi
+ AC_MSG_RESULT([$PYTHON_VER])
+
+ AC_MSG_CHECKING([for python distutils])
+ python_result="`$PYTHON -c 'import distutils;' 2>&1`"
+ if test -z "$python_result" ; then
+ python_has_distutils="yes"
+ else
+ python_has_distutils="no"
+ fi
+ AC_MSG_RESULT([$python_has_distutils])
+
+ if test "$python_has_distutils" != "no" ; then
+ AC_MSG_CHECKING([location of site-packages])
+
+ PYTHON_SITE_DIR="`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0));'`"
+
+ if test -z "$PYTHON_SITE_DIR" ; then
+ AC_MSG_ERROR([Unable to detect python site-packages path])
+ elif test ! -d "$PYTHON_SITE_DIR" ; then
+ AC_MSG_ERROR([Path $PYTHON_SITE_DIR returned by python does not exist!])
+ fi
+ AC_MSG_RESULT([$PYTHON_SITE_DIR])
+ AC_SUBST([PYTHON_SITE_DIR], [$PYTHON_SITE_DIR])
+
+ #
+ # python distutils found, get settings from python directly
+ #
+ PYTHON_CFLAGS="`$PYTHON -c 'from distutils import sysconfig; flags = [[\"-I\" + sysconfig.get_python_inc(0), \"-I\" + sysconfig.get_python_inc(1), \" \".join(sysconfig.get_config_var(\"CFLAGS\").split())]]; print(\" \".join(flags));' | sed -e 's/-arch i386//g;s/-arch x86_64//g'`"
+ PYTHON_LDFLAGS="`$PYTHON -c 'from distutils import sysconfig; libs = sysconfig.get_config_var(\"LIBS\").split() + sysconfig.get_config_var(\"SYSLIBS\").split(); libs.append(\"-lpython\"+sysconfig.get_config_var(\"VERSION\")); print(\" \".join(libs));'`"
+ PYTHON_LIB="`$PYTHON -c 'from distutils import sysconfig; print(\"python\" + sysconfig.get_config_var(\"VERSION\"));'`"
+ PYTHON_LIBDIR="`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_config_var(\"LIBDIR\"));'`"
+
+ # handle python being installed into /usr/local
+ AC_MSG_CHECKING([python libdir])
+ if test -z "`echo $PYTHON_LIBDIR | grep "/usr/lib"`" ; then
+ PYTHON_LDFLAGS="-L$PYTHON_LIBDIR $PYTHON_LDFLAGS"
+ LIBS="-L$PYTHON_LIBDIR $LIBS"
+ fi
+ AC_MSG_RESULT([$PYTHON_LIBDIR])
+
+ # check libpython
+ AC_CHECK_LIB([$PYTHON_LIB], [main], [has_libpython="yes"], [has_libpython="no"])
+
+ if test "$has_libpython" = "no" ; then
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([$PYTHON_LIB is unusable])],
+ [AC_MSG_ERROR([$PYTHON_LIB is unusable])]
+ )
+ fi
+
+ # check whether system libpython is usable and has threads support
+ CFLAGS="$PYTHON_CFLAGS"
+ LIBS="$PYTHON_LDFLAGS"
+ AC_CHECK_FUNC([PyThread_init_thread], [python_has_threads="yes"], [python_has_threads="no"])
+
+ if test "$python_has_threads" = "no"; then
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Your python lacks threads support, can not build mod_python])],
+ [AC_MSG_ERROR([Your python lacks threads support, can not build mod_python])]
+ )
+ else
+ AC_MSG_NOTICE([Your python seems OK, do not forget to enable mod_python in modules.conf])
+ AC_SUBST([PYTHON_CFLAGS], [$PYTHON_CFLAGS])
+ AC_SUBST([PYTHON_LDFLAGS], [$PYTHON_LDFLAGS])
+ fi
+ else
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Could not find or use python distutils module: $python_result])],
+ [AC_MSG_ERROR([Could not find or use python distutils module: $python_result])]
+ )
+ fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ unset python_has_threads
+ unset python_has_distutils
+ unset python_result
+ else
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Could not find python, mod_python will not build, use --with-python to specify the location])],
+ [AC_MSG_ERROR([Could not find python, use --with-python to specify the location])]
+ )
+ fi
+else
+ AC_MSG_WARN([python support disabled, building mod_python will fail!])
+fi
+
+#
+# SNMP checks for mod_snmp
+#
+AC_PATH_PROG([NET_SNMP_CONFIG], [net-snmp-config], [no])
+if test "$NET_SNMP_CONFIG" != "no"; then
+ AC_MSG_CHECKING([for Net-SNMP libraries via net-snmp-config])
+ SNMP_LIBS="`$NET_SNMP_CONFIG --base-agent-libs`"
+else
+ # net-snmp-config not in path, fallback to sensible defaults
+ SNMP_LIBS="-lnetsnmpmibs -lnetsnmpagent -lnetsnmp"
+fi
+
+# fix linking error on Solaris patched Net-SNMP
+AS_CASE([$host], [*-solaris2*], [AC_CHECK_LIB([dladm], [dladm_open], [SNMP_LIBS="$SNMP_LIBS -ldladm"])])
+AC_SUBST(SNMP_LIBS)
+
+CHECK_ERLANG
+
+# Enable clang address sanitizer bit build
+AC_ARG_ENABLE(address_sanitizer,
+ [AC_HELP_STRING([--enable-address-sanitizer],[build with address sanitizer])],
+ [enable_address_sanitizer="$enable_address_sanitizer"],
+ [enable_address_sanitizer="no"])
+
+if test "${enable_address_sanitizer}" = "yes"; then
+ APR_ADDTO(CFLAGS, -fsanitize=address -fno-omit-frame-pointer -fstack-protector-strong)
+ APR_ADDTO(CXXFLAGS, -fsanitize=address -fno-omit-frame-pointer -fstack-protector-strong)
+ APR_ADDTO(LDFLAGS, -fsanitize=address)
+fi
+
+AC_ARG_ENABLE(,
+ [AC_HELP_STRING([--enable-pool-sanitizer],[build with sanitizer friendly pool behavior])],
+ [enable_pool_sanitizer="$enable_pool_sanitizer"],
+ [enable_pool_sanitizer="no"])
+
+if test "${enable_pool_sanitizer}" = "yes"; then
+ APR_ADDTO(CFLAGS, -DDESTROY_POOLS)
+ ac_configure_args="$ac_configure_args --enable-pool-debug=yes"
+fi
+
+# we never use this, and hard setting it will make cross compile work better
+ac_cv_file_dbd_apr_dbd_mysql_c=no
+
+AC_CONFIG_FILES([Makefile
+ build/Makefile
+ tests/unit/Makefile
+ src/Makefile
+ src/mod/Makefile
+ src/mod/applications/mod_audio_fork/Makefile
+ src/mod/applications/mod_aws_lex/Makefile
+ src/mod/applications/mod_aws_transcribe/Makefile
+ src/mod/applications/mod_azure_transcribe/Makefile
+ src/mod/applications/mod_deepgram_transcribe/Makefile
+ src/mod/applications/mod_google_tts/Makefile
+ src/mod/applications/mod_google_transcribe/Makefile
+ src/mod/applications/mod_ibm_transcribe/Makefile
+ src/mod/applications/mod_jambonz_transcribe/Makefile
+ src/mod/applications/mod_nuance_transcribe/Makefile
+ src/mod/applications/mod_nvidia_transcribe/Makefile
+ src/mod/applications/mod_soniox_transcribe/Makefile
+ src/mod/applications/mod_dialogflow/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
+ src/mod/applications/mod_blacklist/Makefile
+ src/mod/applications/mod_callcenter/Makefile
+ src/mod/applications/mod_cidlookup/Makefile
+ src/mod/applications/mod_cluechoo/Makefile
+ src/mod/applications/mod_commands/Makefile
+ src/mod/applications/mod_conference/Makefile
+ src/mod/applications/mod_curl/Makefile
+ src/mod/applications/mod_cv/Makefile
+ src/mod/applications/mod_db/Makefile
+ src/mod/applications/mod_directory/Makefile
+ src/mod/applications/mod_distributor/Makefile
+ src/mod/applications/mod_dptools/Makefile
+ src/mod/applications/mod_easyroute/Makefile
+ src/mod/applications/mod_enum/Makefile
+ src/mod/applications/mod_esf/Makefile
+ src/mod/applications/mod_esl/Makefile
+ src/mod/applications/mod_expr/Makefile
+ src/mod/applications/mod_fifo/Makefile
+ src/mod/applications/mod_fsk/Makefile
+ src/mod/applications/mod_fsv/Makefile
+ src/mod/applications/mod_hash/Makefile
+ src/mod/applications/mod_hiredis/Makefile
+ src/mod/applications/mod_httapi/Makefile
+ src/mod/applications/mod_http_cache/Makefile
+ src/mod/applications/mod_ladspa/Makefile
+ src/mod/applications/mod_lcr/Makefile
+ src/mod/applications/mod_limit/Makefile
+ src/mod/applications/mod_memcache/Makefile
+ src/mod/applications/mod_mongo/Makefile
+ src/mod/applications/mod_mp4/Makefile
+ src/mod/applications/mod_mp4v2/Makefile
+ src/mod/applications/mod_nibblebill/Makefile
+ src/mod/applications/mod_oreka/Makefile
+ src/mod/applications/mod_osp/Makefile
+ src/mod/applications/mod_prefix/Makefile
+ src/mod/applications/mod_rad_auth/Makefile
+ src/mod/applications/mod_random/Makefile
+ src/mod/applications/mod_redis/Makefile
+ src/mod/applications/mod_rss/Makefile
+ src/mod/applications/mod_skel/Makefile
+ src/mod/applications/mod_signalwire/Makefile
+ src/mod/applications/mod_sms/Makefile
+ src/mod/applications/mod_sms_flowroute/Makefile
+ src/mod/applications/mod_snapshot/Makefile
+ src/mod/applications/mod_snom/Makefile
+ src/mod/applications/mod_sonar/Makefile
+ src/mod/applications/mod_soundtouch/Makefile
+ src/mod/applications/mod_spandsp/Makefile
+ src/mod/applications/mod_spy/Makefile
+ src/mod/applications/mod_stress/Makefile
+ src/mod/applications/mod_test/Makefile
+ src/mod/applications/mod_translate/Makefile
+ src/mod/applications/mod_valet_parking/Makefile
+ src/mod/applications/mod_vmd/Makefile
+ src/mod/applications/mod_voicemail/Makefile
+ src/mod/applications/mod_voicemail_ivr/Makefile
+ src/mod/asr_tts/mod_cepstral/Makefile
+ src/mod/asr_tts/mod_flite/Makefile
+ src/mod/asr_tts/mod_pocketsphinx/Makefile
+ src/mod/asr_tts/mod_tts_commandline/Makefile
+ src/mod/asr_tts/mod_unimrcp/Makefile
+ src/mod/codecs/mod_amr/Makefile
+ src/mod/codecs/mod_amrwb/Makefile
+ src/mod/codecs/mod_b64/Makefile
+ src/mod/codecs/mod_bv/Makefile
+ src/mod/codecs/mod_clearmode/Makefile
+ src/mod/codecs/mod_codec2/Makefile
+ src/mod/codecs/mod_com_g729/Makefile
+ src/mod/codecs/mod_dahdi_codec/Makefile
+ src/mod/codecs/mod_g723_1/Makefile
+ src/mod/codecs/mod_g729/Makefile
+ src/mod/codecs/mod_h26x/Makefile
+ src/mod/codecs/mod_ilbc/Makefile
+ src/mod/codecs/mod_isac/Makefile
+ src/mod/codecs/mod_mp4v/Makefile
+ src/mod/codecs/mod_opus/Makefile
+ src/mod/codecs/mod_openh264/Makefile
+ src/mod/codecs/mod_sangoma_codec/Makefile
+ src/mod/codecs/mod_silk/Makefile
+ src/mod/codecs/mod_siren/Makefile
+ src/mod/codecs/mod_skel_codec/Makefile
+ src/mod/codecs/mod_theora/Makefile
+ src/mod/databases/mod_mariadb/Makefile
+ src/mod/databases/mod_pgsql/Makefile
+ src/mod/dialplans/mod_dialplan_asterisk/Makefile
+ src/mod/dialplans/mod_dialplan_directory/Makefile
+ src/mod/dialplans/mod_dialplan_xml/Makefile
+ src/mod/directories/mod_ldap/Makefile
+ src/mod/endpoints/mod_alsa/Makefile
+ src/mod/endpoints/mod_dingaling/Makefile
+ src/mod/endpoints/mod_gsmopen/Makefile
+ src/mod/endpoints/mod_h323/Makefile
+ src/mod/endpoints/mod_khomp/Makefile
+ src/mod/endpoints/mod_loopback/Makefile
+ src/mod/endpoints/mod_opal/Makefile
+ src/mod/endpoints/mod_portaudio/Makefile
+ src/mod/endpoints/mod_reference/Makefile
+ src/mod/endpoints/mod_rtmp/Makefile
+ src/mod/endpoints/mod_skinny/Makefile
+ src/mod/endpoints/mod_sofia/Makefile
+ src/mod/endpoints/mod_unicall/Makefile
+ src/mod/endpoints/mod_rtc/Makefile
+ src/mod/endpoints/mod_verto/Makefile
+ src/mod/event_handlers/mod_amqp/Makefile
+ src/mod/event_handlers/mod_cdr_csv/Makefile
+ src/mod/event_handlers/mod_cdr_mongodb/Makefile
+ src/mod/event_handlers/mod_cdr_pg_csv/Makefile
+ src/mod/event_handlers/mod_cdr_sqlite/Makefile
+ src/mod/event_handlers/mod_erlang_event/Makefile
+ src/mod/event_handlers/mod_event_multicast/Makefile
+ src/mod/event_handlers/mod_event_socket/Makefile
+ src/mod/event_handlers/mod_event_test/Makefile
+ src/mod/event_handlers/mod_fail2ban/Makefile
+ src/mod/event_handlers/mod_format_cdr/Makefile
+ src/mod/event_handlers/mod_json_cdr/Makefile
+ src/mod/event_handlers/mod_kazoo/Makefile
+ src/mod/event_handlers/mod_radius_cdr/Makefile
+ src/mod/event_handlers/mod_odbc_cdr/Makefile
+ src/mod/event_handlers/mod_rayo/Makefile
+ src/mod/event_handlers/mod_smpp/Makefile
+ src/mod/event_handlers/mod_snmp/Makefile
+ src/mod/event_handlers/mod_event_zmq/Makefile
+ src/mod/formats/mod_imagick/Makefile
+ src/mod/formats/mod_local_stream/Makefile
+ src/mod/formats/mod_native_file/Makefile
+ src/mod/formats/mod_opusfile/Makefile
+ src/mod/formats/mod_png/Makefile
+ src/mod/formats/mod_shell_stream/Makefile
+ src/mod/formats/mod_shout/Makefile
+ src/mod/formats/mod_sndfile/Makefile
+ src/mod/formats/mod_ssml/Makefile
+ src/mod/formats/mod_tone_stream/Makefile
+ src/mod/formats/mod_vlc/Makefile
+ src/mod/formats/mod_portaudio_stream/Makefile
+ src/mod/languages/mod_java/Makefile
+ src/mod/languages/mod_lua/Makefile
+ src/mod/languages/mod_managed/Makefile
+ src/mod/languages/mod_perl/Makefile
+ src/mod/languages/mod_python/Makefile
+ src/mod/languages/mod_v8/Makefile
+ src/mod/languages/mod_yaml/Makefile
+ src/mod/languages/mod_basic/Makefile
+ src/mod/loggers/mod_console/Makefile
+ src/mod/loggers/mod_graylog2/Makefile
+ src/mod/loggers/mod_logfile/Makefile
+ src/mod/loggers/mod_syslog/Makefile
+ src/mod/loggers/mod_raven/Makefile
+ src/mod/say/mod_say_de/Makefile
+ src/mod/say/mod_say_en/Makefile
+ src/mod/say/mod_say_es/Makefile
+ src/mod/say/mod_say_es_ar/Makefile
+ src/mod/say/mod_say_fa/Makefile
+ src/mod/say/mod_say_fr/Makefile
+ src/mod/say/mod_say_he/Makefile
+ src/mod/say/mod_say_hr/Makefile
+ src/mod/say/mod_say_hu/Makefile
+ src/mod/say/mod_say_it/Makefile
+ src/mod/say/mod_say_ja/Makefile
+ src/mod/say/mod_say_nl/Makefile
+ src/mod/say/mod_say_pl/Makefile
+ src/mod/say/mod_say_pt/Makefile
+ src/mod/say/mod_say_ru/Makefile
+ src/mod/say/mod_say_sv/Makefile
+ src/mod/say/mod_say_th/Makefile
+ src/mod/say/mod_say_zh/Makefile
+ src/mod/timers/mod_posix_timer/Makefile
+ src/mod/timers/mod_timerfd/Makefile
+ src/mod/xml_int/mod_xml_cdr/Makefile
+ src/mod/xml_int/mod_xml_curl/Makefile
+ src/mod/xml_int/mod_xml_ldap/Makefile
+ src/mod/xml_int/mod_xml_radius/Makefile
+ src/mod/xml_int/mod_xml_rpc/Makefile
+ src/mod/xml_int/mod_xml_scgi/Makefile
+ src/mod/applications/mod_av/Makefile
+ src/mod/applications/mod_video_filter/Makefile
+ src/include/switch_am_config.h
+ build/getsounds.sh
+ build/getlib.sh
+ build/getg729.sh
+ build/freeswitch.pc
+ build/standalone_module/freeswitch.pc
+ build/modmake.rules
+ libs/esl/Makefile
+ libs/esl/perl/Makefile
+ libs/esl/php/Makefile
+ libs/xmlrpc-c/include/xmlrpc-c/config.h
+ libs/xmlrpc-c/xmlrpc_config.h
+ libs/xmlrpc-c/config.mk
+ libs/xmlrpc-c/srcdir.mk
+ libs/xmlrpc-c/stamp-h
+ scripts/gentls_cert])
+
+AM_CONDITIONAL(ISLINUX, [test `uname -s` = Linux])
+AM_CONDITIONAL(ISMAC, [test `uname -s` = Darwin])
+AM_CONDITIONAL(ISFREEBSD, [test `uname -s` = FreeBSD])
+AM_CONDITIONAL(IS64BITLINUX, [test `uname -m` = x86_64])
+
+AM_CONDITIONAL(HAVE_G723_1, [ test -d ${switch_srcdir}/libs/libg723_1 ])
+AM_CONDITIONAL(HAVE_G729, [ test -d ${switch_srcdir}/libs/libg729 ])
+
+#some vars to sub into the Makefile.am's
+#LIBS+=> core.log || error="yes";if test -n "$(VERBOSE)" -o "$$error" = "yes";then cat core.log;fi;if test "$$error" = "yes";then exit 1;fi
+LIBTOOL='$(SHELL) $(switch_builddir)/libtool'
+TOUCH_TARGET='if test -f "$@";then touch "$@";fi;'
+CONF_MODULES='$$(grep -v "\#" $(switch_builddir)/modules.conf | sed "s/|.*//" | sed -e "s|^.*/||" | sort | uniq )'
+CONF_DISABLED_MODULES='$$(grep "\#" $(switch_builddir)/modules.conf | grep -v "\#\#" | sed "s/|.*//" | sed -e "s|^.*/||" | sort | uniq )'
+OUR_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-all ; done )"; echo $$mods )'
+OUR_CLEAN_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-clean ; done )"; echo $$mods )'
+OUR_INSTALL_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-install ; done)"; echo $$mods )'
+OUR_UNINSTALL_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-uninstall ; done)"; echo $$mods )'
+OUR_TEST_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-print_tests ; done )"; echo $$mods )'
+OUR_CHECK_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-check ; done )"; echo $$mods )'
+OUR_DISABLED_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-all ; done )"; echo $$mods )'
+OUR_DISABLED_CLEAN_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-clean ; done )"; echo $$mods )'
+OUR_DISABLED_INSTALL_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-install ; done)"; echo $$mods )'
+OUR_DISABLED_UNINSTALL_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-uninstall ; done)"; echo $$mods )'
+
+#AM_MAKEFLAGS='"OUR_MODULES=$(OUR_MODS)" "OUR_CLEAN_MODULES=$(OUR_CLEAN_MODS)" "OUR_INSTALL_MODULES=$(OUR_INSTALL_MODS)" "OUR_UNINSTALL_MODULES=$(OUR_UNINSTALL_MODS)" "OUR_DISABLED_MODULES=$(OUR_DISABLED_MODS)" "OUR_DISABLED_CLEAN_MODULES=$(OUR_DISABLED_CLEAN_MODS)" "OUR_DISABLED_INSTALL_MODULES=$(OUR_DISABLED_INSTALL_MODS)" "OUR_DISABLED_UNINSTALL_MODULES=$(OUR_DISABLED_UNINSTALL_MODS)" `test -n "$(VERBOSE)" || echo -s`'
+#AM_MAKEFLAGS='`test -n "$(VERBOSE)" || echo -s`'
+AC_SUBST(LIBTOOL)
+AC_SUBST(TOUCH_TARGET)
+AC_SUBST(CONF_DISABLED_MODULES)
+AC_SUBST(CONF_MODULES)
+
+AC_SUBST(OUR_MODS)
+AC_SUBST(OUR_CLEAN_MODS)
+AC_SUBST(OUR_TEST_MODS)
+AC_SUBST(OUR_CHECK_MODS)
+AC_SUBST(OUR_INSTALL_MODS)
+AC_SUBST(OUR_UNINSTALL_MODS)
+AC_SUBST(OUR_DISABLED_MODS)
+AC_SUBST(OUR_DISABLED_CLEAN_MODS)
+AC_SUBST(OUR_DISABLED_INSTALL_MODS)
+AC_SUBST(OUR_DISABLED_UNINSTALL_MODS)
+AC_SUBST(AM_MAKEFLAGS)
+
+ac_configure_args="$ac_configure_args --with-modinstdir=${modulesdir} CONFIGURE_CFLAGS='$CFLAGS $CPPFLAGS' CONFIGURE_CXXFLAGS='$CXXFLAGS $CPPFLAGS' CONFIGURE_LDFLAGS='$LDFLAGS' "
+
+# --prefix='$prefix' --exec_prefix='$exec_prefix' --libdir='$libdir' --libexecdir='$libexecdir' --bindir='$bindir' --sbindir='$sbindir' \
+# --localstatedir='$localstatedir' --datadir='$datadir'"
+
+# Run configure in all the subdirs
+AC_CONFIG_SUBDIRS([libs/srtp])
+if test "$use_system_apr" != "yes"; then
+ AC_CONFIG_SUBDIRS([libs/apr])
+fi
+if test "$use_system_aprutil" != "yes"; then
+ AC_CONFIG_SUBDIRS([libs/apr-util])
+fi
+AC_CONFIG_SUBDIRS([libs/iksemel])
+AC_CONFIG_SUBDIRS([libs/libdingaling])
+AC_CONFIG_SUBDIRS([libs/freetdm])
+AC_CONFIG_SUBDIRS([libs/unimrcp])
+if test "x${enable_zrtp}" = "xyes"; then
+ AC_CONFIG_SUBDIRS([libs/libzrtp])
+fi
+
+case $host in
+ *-openbsd*|*-netbsd*)
+ # libtool won't link static libs against shared ones on NetBSD/OpenBSD unless we tell it not to be stupid
+ AC_CONFIG_COMMANDS([hacklibtool], [cp libtool libtool.orig && sed -e "s/deplibs_check_method=.*/deplibs_check_method=pass_all/g" libtool.orig > libtool])
+ ;;
+esac
+
+
+AC_OUTPUT
+
+##
+## Configuration summary
+##
+
+echo
+echo "-------------------------- FreeSWITCH configuration --------------------------"
+echo ""
+echo " Locations:"
+echo ""
+echo " prefix: ${prefix}"
+echo " exec_prefix: ${exec_prefix}"
+echo " bindir: ${bindir}"
+echo " confdir: ${confdir}"
+echo " libdir: ${libdir}"
+echo " datadir: ${datadir}"
+echo " localstatedir: ${localstatedir}"
+echo " includedir: ${includedir}"
+echo ""
+echo " certsdir: ${certsdir}"
+echo " dbdir: ${dbdir}"
+echo " grammardir: ${grammardir}"
+echo " htdocsdir: ${htdocsdir}"
+echo " fontsdir: ${fontsdir}"
+echo " logfiledir: ${logfiledir}"
+echo " modulesdir: ${modulesdir}"
+echo " pkgconfigdir: ${pkgconfigdir}"
+echo " recordingsdir: ${recordingsdir}"
+echo " imagesdir: ${imagesdir}"
+echo " runtimedir: ${runtimedir}"
+echo " scriptdir: ${scriptdir}"
+echo " soundsdir: ${soundsdir}"
+echo " storagedir: ${storagedir}"
+echo " cachedir: ${cachedir}"
+echo ""
+echo "------------------------------------------------------------------------------"
diff --git a/packer/jambonz-mini/gcp/files/configure.ac.grpc.patch b/packer/jambonz-mini/gcp/files/configure.ac.grpc.patch
new file mode 100644
index 0000000..f030a1b
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/configure.ac.grpc.patch
@@ -0,0 +1,33 @@
+--- configure.ac 2019-10-22 22:47:40.566582350 +0000
++++ configure.ac.new 2019-10-23 14:56:29.469206772 +0000
+@@ -1563,6 +1563,20 @@
+ AM_CONDITIONAL([HAVE_LWS],[false])
+ fi
+
+++dnl DH: Added for including google protobuf libs
+++AC_ARG_WITH(grpc,
++ [AS_HELP_STRING([--with-grpc],
++ [enable support for google rpc (libgrpc++ and libgrpc)])],
++ [with_grpc="$withval"],
++ [with_grpc="no"])
++if test "$with_grpc" = "yes"; then
++ PKG_CHECK_MODULES([GRPC], [grpc++ grpc], [
++ AM_CONDITIONAL([HAVE_GRPC],[true])], [
++ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GRPC],[false])])
++else
++ AM_CONDITIONAL([HAVE_GRPC],[false])
++fi
++
+ dnl ---------------------------------------------------------------------------
+ dnl - OpenLDAP SDK
+ dnl ---------------------------------------------------------------------------
+@@ -1858,6 +1872,9 @@
+ src/Makefile
+ src/mod/Makefile
+ src/mod/applications/mod_audio_fork/Makefile
++ src/mod/applications/mod_google_tts/Makefile
++ src/mod/applications/mod_google_transcribe/Makefile
++ src/mod/applications/mod_dialogflow/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
diff --git a/packer/jambonz-mini/gcp/files/configure.ac.patch b/packer/jambonz-mini/gcp/files/configure.ac.patch
new file mode 100644
index 0000000..f1baabc
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/configure.ac.patch
@@ -0,0 +1,40 @@
+--- configure.ac 2019-09-30 19:01:33.308021065 +0000
++++ configure.ac.new 2019-09-30 23:00:53.730843843 +0000
+@@ -13,7 +13,7 @@
+ AC_CONFIG_FILES([src/include/switch_version.h.in:src/include/switch_version.h.template])
+
+ AC_CONFIG_AUX_DIR(build/config)
+-AM_INIT_AUTOMAKE
++AM_INIT_AUTOMAKE([subdir-objects])
+ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+ AC_CONFIG_SRCDIR([src/switch.c])
+ AC_CONFIG_HEADER([src/include/switch_private.h])
+@@ -1549,6 +1549,20 @@
+ AC_MSG_ERROR([You need to either install libedit-dev (>= 2.11) or configure with --disable-core-libedit-support])
+ ])])])
+
++dnl DH: Added for including libwebsockets
++AC_ARG_WITH(lws,
++ [AS_HELP_STRING([--with-lws],
++ [enable support for libwebsockets])],
++ [with_lws="$withval"],
++ [with_lws="no"])
++if test "$with_lws" = "yes"; then
++ PKG_CHECK_MODULES([LWS], [libwebsockets], [
++ AM_CONDITIONAL([HAVE_LWS],[true])], [
++ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LWS],[false])])
++else
++ AM_CONDITIONAL([HAVE_LWS],[false])
++fi
++
+ dnl ---------------------------------------------------------------------------
+ dnl - OpenLDAP SDK
+ dnl ---------------------------------------------------------------------------
+@@ -1843,6 +1857,7 @@
+ tests/unit/Makefile
+ src/Makefile
+ src/mod/Makefile
++ src/mod/applications/mod_audio_fork/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
diff --git a/packer/jambonz-mini/gcp/files/drachtio-5070.conf.xml b/packer/jambonz-mini/gcp/files/drachtio-5070.conf.xml
new file mode 100644
index 0000000..f7ac6ae
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio-5070.conf.xml
@@ -0,0 +1,31 @@
+
+
+
+ 0.0.0.0
+
+
+
+
+
+ 8192
+
+
+
+ false
+
+
+
+
+ /var/log/drachtio/drachtio-5070.log
+ /var/log/drachtio/archive
+ 100
+ 10000
+ true
+
+
+ 3
+
+ info
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/drachtio-5070.gcp.service b/packer/jambonz-mini/gcp/files/drachtio-5070.gcp.service
new file mode 100644
index 0000000..63d02da
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio-5070.gcp.service
@@ -0,0 +1,29 @@
+
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip`'
+ExecStart=/usr/local/bin/drachtio --daemon -f /etc/drachtio-5070.conf.xml --contact sip:${LOCAL_IP}:5070;transport=udp,tcp --address 0.0.0.0 --port 9023
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/drachtio-5070.service b/packer/jambonz-mini/gcp/files/drachtio-5070.service
new file mode 100644
index 0000000..c7d4530
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio-5070.service
@@ -0,0 +1,29 @@
+
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s http://169.254.169.254/latest/meta-data/local-ipv4`'
+ExecStart=/usr/local/bin/drachtio --daemon -f /etc/drachtio-5070.conf.xml --contact sip:${LOCAL_IP}:5070;transport=udp,tcp --address 0.0.0.0 --port 9023
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/drachtio-fail2ban.conf b/packer/jambonz-mini/gcp/files/drachtio-fail2ban.conf
new file mode 100644
index 0000000..bce54d4
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio-fail2ban.conf
@@ -0,0 +1,18 @@
+# Fail2Ban filter for drachtio spammer detection
+#
+
+[INCLUDES]
+
+# Read common prefixes. If any customizations available -- read them from
+# common.local
+before = common.conf
+
+[Definition]
+
+_daemon = drachtio
+
+__pid_re = (?:\[\d+\])
+
+failregex = detected potential spammer from :\d+
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/drachtio.conf.xml b/packer/jambonz-mini/gcp/files/drachtio.conf.xml
new file mode 100644
index 0000000..52a0c1f
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio.conf.xml
@@ -0,0 +1,35 @@
+
+
+
+ 127.0.0.1
+
+
+ http://127.0.0.1:4000
+
+
+
+
+
+
+ 8192
+
+
+
+ false
+
+
+
+
+ /var/log/drachtio/drachtio.log
+ /var/log/drachtio/archive
+ 100
+ 10000
+ false
+
+
+ 3
+
+ info
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/drachtio.gcp.service b/packer/jambonz-mini/gcp/files/drachtio.gcp.service
new file mode 100644
index 0000000..555547d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio.gcp.service
@@ -0,0 +1,32 @@
+
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip`'
+ExecStartPre=/bin/sh -c 'systemctl set-environment PUBLIC_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip`'
+ExecStart=/usr/local/bin/drachtio --daemon --contact sip:${LOCAL_IP};transport=udp --external-ip ${PUBLIC_IP} \
+--contact sip:${LOCAL_IP};transport=tcp \
+ --address 0.0.0.0 --port 9022 --homer 127.0.0.1:9060 --homer-id 10
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/drachtio.service b/packer/jambonz-mini/gcp/files/drachtio.service
new file mode 100644
index 0000000..40a9844
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/drachtio.service
@@ -0,0 +1,32 @@
+
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s http://169.254.169.254/latest/meta-data/local-ipv4`'
+ExecStartPre=/bin/sh -c 'systemctl set-environment PUBLIC_IP=`curl -s http://169.254.169.254/latest/meta-data/public-ipv4`'
+ExecStart=/usr/local/bin/drachtio --daemon --contact sip:${LOCAL_IP};transport=udp --external-ip ${PUBLIC_IP} \
+--contact sip:${LOCAL_IP};transport=tcp \
+ --address 0.0.0.0 --port 9022 --homer 127.0.0.1:9060 --homer-id 10
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/ecosystem.config.js b/packer/jambonz-mini/gcp/files/ecosystem.config.js
new file mode 100644
index 0000000..275f5bd
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/ecosystem.config.js
@@ -0,0 +1,297 @@
+module.exports = {
+ apps : [
+ {
+ name: 'jambonz-webapp',
+ script: 'npm',
+ cwd: '/home/admin/apps/jambonz-webapp',
+ args: 'run serve'
+ },
+ {
+ name: 'jambonz-smpp-esme',
+ cwd: '/home/admin/apps/jambonz-smpp-esme',
+ script: 'app.js',
+ out_file: '/home/admin/.pm2/logs/jambonz-smpp-esme.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-smpp-esme.log',
+ combine_logs: true,
+ instance_var: 'INSTANCE_ID',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '2G',
+ env: {
+ NODE_ENV: 'production',
+ HTTP_PORT: 3020,
+ AVOID_UDH: true,
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info'
+ }
+ },
+ {
+ name: 'jambonz-api-server',
+ cwd: '/home/admin/apps/jambonz-api-server',
+ script: 'app.js',
+ out_file: '/home/admin/.pm2/logs/jambonz-api-server.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-api-server.log',
+ combine_logs: true,
+ instance_var: 'INSTANCE_ID',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ AUTHENTICATION_KEY: 'JWT-SECRET-GOES_HERE',
+ JWT_SECRET: 'JWT-SECRET-GOES_HERE',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONE_API_VERSION: 'v1',
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ HTTP_PORT: 3002,
+ JAEGER_BASE_URL: 'http://127.0.0.1:16686',
+ HOMER_BASE_URL: 'http://127.0.0.1:9080',
+ HOMER_USERNAME: 'admin',
+ HOMER_PASSWORD: 'sipcapture'
+ }
+ },
+ {
+ name: 'sbc-call-router',
+ cwd: '/home/admin/apps/sbc-call-router',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ combine_logs: true,
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-call-router.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-call-router.log',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ HTTP_PORT: 4000,
+ JAMBONES_INBOUND_ROUTE: '127.0.0.1:4002',
+ JAMBONES_OUTBOUND_ROUTE: '127.0.0.1:4003',
+ JAMBONZ_TAGGED_INBOUND: 1,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32'
+ }
+ },
+ {
+ name: 'sbc-sip-sidecar',
+ cwd: '/home/admin/apps/sbc-sip-sidecar',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_LOGLEVEL: 'info',
+ RTPENGINE_PING_INTERVAL: 30000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1
+ }
+ },
+ {
+ name: 'sbc-outbound',
+ cwd: '/home/admin/apps/sbc-outbound',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-outbound.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-outbound.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ MIN_CALL_LIMIT: 9999,
+ RTPENGINE_PING_INTERVAL: 30000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_RTPENGINE_UDP_PORT: 6000,
+ JAMBONES_RTPENGINES: '127.0.0.1:22222',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ JAMBONES_TRACK_ACCOUNT_CALLS: 0,
+ JAMBONES_TRACK_SP_CALLS: 0,
+ JAMBONES_TRACK_APP_CALLS: 0,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ MS_TEAMS_FQDN: ''
+ }
+ },
+ {
+ name: 'sbc-inbound',
+ cwd: '/home/admin/apps/sbc-inbound',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-inbound.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-inbound.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_LOGLEVEL: 'info',
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_RTPENGINE_UDP_PORT: 7000,
+ JAMBONES_RTPENGINES: '127.0.0.1:22222',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ JAMBONES_TRACK_ACCOUNT_CALLS: 0,
+ JAMBONES_TRACK_SP_CALLS: 0,
+ JAMBONES_TRACK_APP_CALLS: 0,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ MS_TEAMS_SIP_PROXY_IPS: '52.114.148.0, 52.114.132.46, 52.114.75.24, 52.114.76.76, 52.114.7.24, 52.114.14.70'
+ }
+ },
+ {
+ name: 'sbc-rtpengine-sidecar',
+ cwd: '/home/admin/apps/sbc-rtpengine-sidecar',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-rtpengine-sidecar.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-rtpengine-sidecar.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ LOGLEVEL: 'info',
+ DTMF_ONLY: true,
+ RTPENGINE_DTMF_LOG_PORT: 22223,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1
+ }
+ },
+ {
+ name: 'jambonz-feature-server',
+ cwd: '/home/admin/apps/jambonz-feature-server',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-feature-server.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-feature-server.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ AUTHENTICATION_KEY: 'JWT-SECRET-GOES_HERE',
+ JWT_SECRET: 'JWT-SECRET-GOES_HERE',
+ JAMBONES_GATHER_EARLY_HINTS_MATCH: 1,
+ JAMBONES_OTEL_ENABLED: 1,
+ OTEL_EXPORTER_JAEGER_ENDPOINT: 'http://localhost:14268/api/traces',
+ OTEL_EXPORTER_OTLP_METRICS_INSECURE: 1,
+ OTEL_EXPORTER_JAEGER_GRPC_INSECURE: 1,
+ OTEL_TRACES_SAMPLER: 'parentbased_traceidratio',
+ OTEL_TRACES_SAMPLER_ARG: 1.0,
+ VMD_HINTS_FILE: '/home/admin/apps/jambonz-feature-server/data/example-voicemail-greetings.json',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ AWS_REGION: 'AWS_REGION_NAME',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_API_BASE_URL: '--JAMBONES_API_BASE_URL--',
+ JAMBONES_GATHER_EARLY_HINTS_MATCH: 1,
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ HTTP_PORT: 3000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9023,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_SBCS: 'PRIVATE_IP',
+ JAMBONES_FREESWITCH: '127.0.0.1:8021:JambonzR0ck$',
+ SMPP_URL: 'http://PRIVATE_IP:3020'
+ }
+ }
+]
+};
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/event_socket.conf.xml b/packer/jambonz-mini/gcp/files/event_socket.conf.xml
new file mode 100644
index 0000000..075dd8e
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/event_socket.conf.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/freeswitch.service b/packer/jambonz-mini/gcp/files/freeswitch.service
new file mode 100644
index 0000000..a40b612
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/freeswitch.service
@@ -0,0 +1,35 @@
+
+[Unit]
+Description=freeswitch
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+PIDFile=/usr/local/freeswitch/run/freeswitch.pid
+EnvironmentFile=-/etc/default/freeswitch
+Environment="MOD_AUDIO_FORK_SUBPROTOCOL_NAME=audio.jambonz.org"
+Environment="MOD_AUDIO_FORK_SERVICE_THREADS=1"
+Environment="MOD_AUDIO_FORK_BUFFER_SECS=3"
+Environment="LD_LIBRARY_PATH=/usr/local/lib"
+Environment="GOOGLE_APPLICATION_CREDENTIALS=/home/admin/credentials/gcp.json"
+ExecStart=/usr/local/freeswitch/bin/freeswitch -nc -nonat
+TimeoutSec=45s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/freeswitch_log_rotation b/packer/jambonz-mini/gcp/files/freeswitch_log_rotation
new file mode 100644
index 0000000..6028983
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/freeswitch_log_rotation
@@ -0,0 +1,26 @@
+#!/bin/bash
+# logrotate replacement script
+# source : http://wiki.fusionpbx.com/index.php?title=RotateFSLogs
+# put in /etc/cron.daily
+# don't forget to make it executable
+# you might consider changing /usr/local/freeswitch/conf/autoload_configs/logfile.conf.xml
+#
+
+#number of days of logs to keep
+NUMBERDAYS=5
+FSPATH=/usr/local/freeswitch/
+
+$FSPATH/bin/fs_cli -x "fsctl send_sighup" |grep '+OK' >/tmp/rotateFSlogs
+if [ $? -eq 0 ]; then
+ #-cmin 2 could bite us (leave some files uncompressed, eg 11M auto-rotate). Maybe -1440 is better?
+ find $FSPATH/log/ -name "freeswitch.log.*" -cmin -2 -exec gzip {} \;
+ find $FSPATH/log/ -name "freeswitch.log.*.gz" -mtime +$NUMBERDAYS -exec /bin/rm {} \;
+ chown www-data.www-data $FSPATH/log/freeswitch.log
+ chmod 660 $FSPATH/log/freeswitch.log
+ logger FreeSWITCH Logs rotated
+ /bin/rm /tmp/rotateFSlogs
+else
+ logger FreeSWITCH Log Rotation Script FAILED
+ mail -s '$HOST FS Log Rotate Error' root < /tmp/rotateFSlogs
+ /bin/rm /tmp/rotateFSlogs
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/grafana-dashboard-default.yaml b/packer/jambonz-mini/gcp/files/grafana-dashboard-default.yaml
new file mode 100644
index 0000000..34d3347
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/grafana-dashboard-default.yaml
@@ -0,0 +1,8 @@
+apiVersion: 1
+
+providers:
+ - name: Default
+ type: file
+ folder: 'jambonz'
+ options:
+ path: /var/lib/grafana/dashboards
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/grafana-dashboard-heplify.json b/packer/jambonz-mini/gcp/files/grafana-dashboard-heplify.json
new file mode 100644
index 0000000..f3a003a
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/grafana-dashboard-heplify.json
@@ -0,0 +1,1097 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 2,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_kpi_rrd",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "*"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RRD",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_packets_lost",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP Packet Loss",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_kpi_srd",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "*"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "SRD",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_jitter",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP Jitter",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "type"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_packets_total",
+ "orderByTime": "ASC",
+ "policy": "60s",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "counter"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "HEPlify Packets Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 14
+ },
+ "hiddenSeries": false,
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_dlsr",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP DLSR",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "type"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_packets_size",
+ "orderByTime": "ASC",
+ "policy": "60s",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "HEPlify Packets Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 180
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 12,
+ "options": {
+ "displayMode": "gradient",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "mean"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "7.3.1",
+ "targets": [
+ {
+ "alias": "$tag_method -> $tag_response",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "method"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "response"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_method_response",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "counter"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Methods and Responses",
+ "type": "bargauge"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "HEPlify Metrics",
+ "uid": "HO0OhLtGk",
+ "version": 1
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/grafana-dashboard-jambonz.json b/packer/jambonz-mini/gcp/files/grafana-dashboard-jambonz.json
new file mode 100644
index 0000000..5f76694
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/grafana-dashboard-jambonz.json
@@ -0,0 +1,2683 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 0
+ },
+ "id": 9,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.5.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT sum(\"last_value\") FROM (SELECT last(\"value\") AS \"last_value\" FROM \"sbc_sip_calls_count\" WHERE $timeFilter GROUP BY time($__interval),\"host\", \"instance_id\" fill(null)) GROUP BY time($__interval) fill(null)",
+ "queryType": "randomWalk",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Current Calls",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 0
+ },
+ "id": 10,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.5.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_media_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT sum(\"last_value\") FROM (SELECT last(\"value\") AS \"last_value\" FROM \"sbc_media_calls_count\" WHERE $timeFilter GROUP BY time($__interval), \"host\" fill(null)) GROUP BY time($__interval) fill(null)",
+ "queryType": "randomWalk",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Current Media Streams",
+ "type": "stat"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 18,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:309",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:310",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "alias": "Active media sessions (SBCs)",
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 13,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "linear"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_media_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Media Streams",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:256",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:257",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 3
+ },
+ "id": 39,
+ "options": {
+ "displayLabels": [],
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "sipStatus"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_terminations",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Inbound Response Codes",
+ "type": "piechart"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "direction",
+ "operator": "=",
+ "value": "inbound"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Inbound Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:362",
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:363",
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "fs_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Feature Server Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:468",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:469",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 10
+ },
+ "id": 20,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "average",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_hook_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "90th percentile",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_hook_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "90_percentile"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Webhook Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 10
+ },
+ "id": 43,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_rtpengine_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Rtpengine command response time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 10
+ },
+ "id": 41,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_mysql_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "MySQL Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 15
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_vendor",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "vendor"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "TTS Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 15
+ },
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "total requests",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "vendor"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_cache_requests",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "served from cache",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_cache_requests",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\") FROM \"tts_cache_requests\" WHERE (\"found\" = 'yes') AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": false,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "found",
+ "operator": "=",
+ "value": "yes"
+ }
+ ]
+ }
+ ],
+ "title": "TTS Cache Hits",
+ "type": "timeseries"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "fs_media_channels_in_use",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Freeswitch Channels",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:1035",
+ "decimals": 0,
+ "format": "none",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:1036",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 20
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_invites",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "SBC Invites",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 20
+ },
+ "id": 19,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "direction",
+ "operator": "=",
+ "value": "outbound"
+ }
+ ]
+ }
+ ],
+ "title": "Total Outbound Calls",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 20
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_users_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Active registrations",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 22,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "mem",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "available_percent"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "% free memory",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 1,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decgbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 31,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "exe"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "procstat",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "memory_usage"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "Memory By Process",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 1,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 23,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "system",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "load1"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "Load avg",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 37,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "exe"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "procstat",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "cpu_usage"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "CPU Usage By Process",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-12h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Jambonz Metrics",
+ "uid": "oAM51epMz",
+ "version": 1,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/grafana-dashboard-servers.json b/packer/jambonz-mini/gcp/files/grafana-dashboard-servers.json
new file mode 100644
index 0000000..e48d48f
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/grafana-dashboard-servers.json
@@ -0,0 +1,5121 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "InfluxDB dashboards for telegraf metrics",
+ "editable": true,
+ "gnetId": 5955,
+ "graphTooltip": 1,
+ "id": 4,
+ "iteration": 1604669735342,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 65058,
+ "panels": [],
+ "title": "Quick overview",
+ "type": "row"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "s",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 1
+ },
+ "id": 65078,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"uptime_format\") AS \"value\" FROM \"system\" WHERE \"host\" =~ /$server$/ AND $timeFilter GROUP BY time($interval)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Uptime",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 4,
+ "y": 1
+ },
+ "id": 65079,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"disk\" WHERE (\"host\" =~ /^$server$/ AND \"path\" = '/') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "Root FS Used",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 6,
+ "y": 1
+ },
+ "id": 65080,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"load5\") FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "3,7,10",
+ "title": "LA (Medium)",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 8,
+ "y": 1
+ },
+ "id": 65081,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"n_cpus\") AS \"mean_n_cpus\" FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null) ",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "CPUs",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 10,
+ "y": 1
+ },
+ "id": 65082,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"usage_idle\") * -1 + 100 FROM \"cpu\" WHERE (\"host\" =~ /^$server$/ AND \"cpu\" = 'cpu-total') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "CPU usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 12,
+ "y": 1
+ },
+ "id": 65083,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"mem\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "RAM usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 2,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 14,
+ "y": 1
+ },
+ "id": 65084,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT mean(\"usage_iowait\") FROM \"cpu\" WHERE (\"host\" =~ /^$server$/ AND \"cpu\" = 'cpu-total') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "IOWait",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 16,
+ "y": 1
+ },
+ "id": 65085,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT last(\"total\") FROM \"processes\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "1,5,10",
+ "title": "Processes",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 18,
+ "y": 1
+ },
+ "id": 65086,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT last(\"total_threads\") FROM \"processes\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "1,5,10",
+ "title": "Threads",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 20,
+ "y": 1
+ },
+ "id": 65087,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"swap\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "50,70,90",
+ "title": "Swap Usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 22,
+ "y": 1
+ },
+ "id": 65088,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"n_users\") FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "20,50",
+ "title": "Users",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 4
+ },
+ "id": 65060,
+ "panels": [],
+ "title": "SYSTEM - CPU, Memory, Disk",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 12054,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/mem_total/",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "mem_inactive",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(total) as total, mean(used) as used, mean(cached) as cached, mean(free) as free, mean(buffered) as buffered FROM \"mem\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 65092,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(usage_user) as \"user\", mean(usage_system) as \"system\", mean(usage_softirq) as \"softirq\", mean(usage_steal) as \"steal\", mean(usage_nice) as \"nice\", mean(usage_irq) as \"irq\", mean(usage_iowait) as \"iowait\", mean(usage_guest) as \"guest\", mean(usage_guest_nice) as \"guest_nice\" FROM \"cpu\" WHERE \"host\" =~ /$server$/ and cpu = 'cpu-total' AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "hiddenSeries": false,
+ "id": 54694,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": null,
+ "sortDesc": null,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "system_load1",
+ "policy": "default",
+ "query": "SELECT mean(load1) as load1,mean(load5) as load5,mean(load15) as load15 FROM \"system\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), * ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Load",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "hiddenSeries": false,
+ "id": 65089,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/cpu/"
+ },
+ {
+ "alias": "/avg/"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_cpu",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT 100 - mean(\"usage_idle\") FROM \"cpu\" WHERE (\"cpu\" =~ /cpu[0-9].*/ AND \"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"cpu\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU usage per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 28239,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(running) as running, mean(blocked) as blocked, mean(sleeping) as sleeping, mean(stopped) as stopped, mean(zombies) as zombies, mean(paging) as paging, mean(unknown) as unknown FROM \"processes\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Processes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 65097,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(context_switches),1s)as \"context switches\" FROM \"kernel\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Context Switches",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ops",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 40
+ },
+ "id": 65096,
+ "panels": [],
+ "title": "Disk",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 41
+ },
+ "hiddenSeries": false,
+ "id": 52240,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.total",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_path : $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(total) AS \"total\", mean(used) as \"used\", mean(free) as \"free\" FROM \"disk\" WHERE \"host\" =~ /$server$/ AND \"path\" = '/' AND $timeFilter GROUP BY time($interval), \"host\", \"path\"",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Root Disk usage (/)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 41
+ },
+ "hiddenSeries": false,
+ "id": 65090,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.used_percent",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_path ($tag_fstype on $tag_device)",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM \"disk\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"path\", \"device\", \"fstype\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "All partitions usage (%)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 33458,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/used/",
+ "color": "#BF1B00",
+ "zindex": 3
+ },
+ {
+ "alias": "/free/",
+ "bars": false,
+ "fill": 0,
+ "lines": true,
+ "linewidth": 1
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_path : $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_inodes_free",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(inodes_total) as \"total\", mean(inodes_free) as \"free\", mean(inodes_used) as \"used\" FROM \"disk\" WHERE \"host\" =~ /$server$/ AND \"path\" = '/' AND $timeFilter GROUP BY time($interval), \"host\", \"path\"",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Root (/) Disk inodes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 10,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 65091,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.used_percent",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_path ($tag_fstype on $tag_device)",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"inodes_free\") AS \"free\" FROM \"disk\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"path\", \"device\", \"fstype\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "All partitions Inodes (Free)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "id": 61850,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/total/",
+ "fill": 0
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "swap_in",
+ "policy": "default",
+ "query": "SELECT mean(free) as \"free\", mean(used) as \"used\", mean(total) as \"total\" FROM \"swap\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Swap usage (bytes)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 57
+ },
+ "id": 26024,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/in/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "swap_in",
+ "policy": "default",
+ "query": "SELECT mean(\"in\") as \"in\", mean(\"out\") as \"out\" FROM \"swap\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Swap I/O bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 13782,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(reads),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT non_negative_derivative(mean(writes),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O requests",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "iops",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 56720,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/read/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(read_time),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(write_time),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O time",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 73
+ },
+ "id": 60200,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/read/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(read_bytes),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(write_bytes),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 81
+ },
+ "id": 65059,
+ "panels": [],
+ "title": "Network",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 82
+ },
+ "id": 42026,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/ in$/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(bytes_recv),1s)*8 as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(bytes_sent),1s)*8 as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bps",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 90
+ },
+ "id": 28572,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/ in$/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(packets_recv), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(packets_sent), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Packets",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 98
+ },
+ "id": 65093,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(tcp_close) as CLOSED, mean(tcp_close_wait) as CLOSE_WAIT, mean(tcp_closing) as CLOSING, mean(tcp_established) as ESTABLISHED, mean(tcp_fin_wait1) as FIN_WAIT1, mean(tcp_fin_wait2) as FIN_WAIT2, mean(tcp_last_ack) as LAST_ACK, mean(tcp_syn_recv) as SYN_RECV, mean(tcp_syn_sent) as SYN_SENT, mean(tcp_time_wait) as TIME_WAIT FROM \"netstat\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "TCP connections",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 105
+ },
+ "id": 65094,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_outrsts), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_activeopens), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_estabresets), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_passiveopens), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "D",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "TCP handshake issues",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 112
+ },
+ "id": 58901,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(drop_in), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(drop_out), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Packets Drop",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets drop",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 112
+ },
+ "id": 50643,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(err_in), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(err_out), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Packets Error",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets drop",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "influxdb",
+ "telegraf"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allFormat": "glob",
+ "current": {
+ "selected": false,
+ "text": "InfluxDB",
+ "value": "InfluxDB"
+ },
+ "datasource": "InfluxDB-Telegraf",
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "",
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "influxdb",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "tags": [],
+ "text": [
+ "ip-172-31-33-65"
+ ],
+ "value": [
+ "ip-172-31-33-65"
+ ]
+ },
+ "datasource": "InfluxDB-Telegraf",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Server",
+ "multi": true,
+ "name": "server",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM system WITH KEY=host",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "auto": true,
+ "auto_count": 100,
+ "auto_min": "30s",
+ "current": {
+ "selected": false,
+ "text": "auto",
+ "value": "$__auto_interval_inter"
+ },
+ "datasource": null,
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Interval",
+ "multi": false,
+ "name": "inter",
+ "options": [
+ {
+ "selected": true,
+ "text": "auto",
+ "value": "$__auto_interval_inter"
+ },
+ {
+ "selected": false,
+ "text": "1s",
+ "value": "1s"
+ },
+ {
+ "selected": false,
+ "text": "5s",
+ "value": "5s"
+ },
+ {
+ "selected": false,
+ "text": "10s",
+ "value": "10s"
+ },
+ {
+ "selected": false,
+ "text": "15s",
+ "value": "15s"
+ },
+ {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ },
+ {
+ "selected": false,
+ "text": "60d",
+ "value": "60d"
+ },
+ {
+ "selected": false,
+ "text": "90d",
+ "value": "90d"
+ }
+ ],
+ "query": "1s,5s,10s,15s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d,60d,90d",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "CPU",
+ "multi": true,
+ "name": "cpu",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"cpu\" WITH KEY = \"cpu\" WHERE host =~ /$server/",
+ "refresh": 1,
+ "regex": "^cpu[0-9].*",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "disk",
+ "multi": true,
+ "name": "disk",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"disk\" WITH KEY = \"device\"",
+ "refresh": 1,
+ "regex": "/[a-z]d[\\D]$/",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": true
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "interface",
+ "multi": true,
+ "name": "interface",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"net\" WITH KEY = \"interface\" WHERE host =~ /$server/",
+ "refresh": 1,
+ "regex": "^(?!.*veth|all|tap).*",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": true
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Telegraf - system metrics",
+ "uid": "zBFM0ohGz",
+ "version": 1
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/grafana-datasource.yml b/packer/jambonz-mini/gcp/files/grafana-datasource.yml
new file mode 100644
index 0000000..e3d21b4
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/grafana-datasource.yml
@@ -0,0 +1,34 @@
+# config file version
+apiVersion: 1
+
+# list of datasources that should be deleted from the database
+deleteDatasources:
+ - name: InfluxDB
+ orgId: 1
+ - name: InfluxDB-Telegraf
+ orgId: 1
+
+# list of datasources to insert/update depending
+# whats available in the database
+datasources:
+- name: InfluxDB
+ type: influxdb
+ access: proxy
+ database: homer
+ user: grafana
+ url: http://127.0.0.1:8086
+ jsonData:
+ timeInterval: "15s"
+ # allow users to edit datasources from the UI.
+ editable: true
+
+- name: InfluxDB-Telegraf
+ type: influxdb
+ access: proxy
+ database: telegraf
+ user: grafana
+ url: http://127.0.0.1:8086
+ jsonData:
+ timeInterval: "15s"
+ # allow users to edit datasources from the UI.
+ editable: true
diff --git a/packer/jambonz-mini/gcp/files/initialize-webapp-userdata.sh b/packer/jambonz-mini/gcp/files/initialize-webapp-userdata.sh
new file mode 100644
index 0000000..0dc482d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/initialize-webapp-userdata.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+PRIVATE_IPV4="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)"
+PUBLIC_IPV4="$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)"
+echo "REACT_APP_API_BASE_URL=http://${PUBLIC_IPV4}/api/v1" > /home/admin/apps/jambonz-webapp/.env
+cd /home/admin/apps/jambonz-webapp && sudo npm install --unsafe-perm && npm run build
+
+# update ecosystem.config.js with private ip
+sudo sed -i -e "s/\(.*\)PRIVATE_IP\(.*\)/\1${PRIVATE_IPV4}\2/g" /home/admin/apps/ecosystem.config.js
+sudo -u admin bash -c "pm2 restart /home/admin/apps/ecosystem.config.js"
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/jaeger.service b/packer/jambonz-mini/gcp/files/jaeger.service
new file mode 100644
index 0000000..cd45cfc
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/jaeger.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=jaeger service unit file.
+After=syslog.target network.target local-fs.target
+
+[Service]
+Type=exec
+ExecStart=/usr/local/bin/jaeger-all-in-one
+
+[Install]
+WantedBy=multi-user.target
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/jambones-sql.sql b/packer/jambonz-mini/gcp/files/jambones-sql.sql
new file mode 100644
index 0000000..3f7a2f7
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/jambones-sql.sql
@@ -0,0 +1,271 @@
+/* SQLEditor (MySQL (2))*/
+
+SET FOREIGN_KEY_CHECKS=0;
+
+DROP TABLE IF EXISTS call_routes;
+
+DROP TABLE IF EXISTS lcr_carrier_set_entry;
+
+DROP TABLE IF EXISTS lcr_routes;
+
+DROP TABLE IF EXISTS api_keys;
+
+DROP TABLE IF EXISTS ms_teams_tenants;
+
+DROP TABLE IF EXISTS sbc_addresses;
+
+DROP TABLE IF EXISTS users;
+
+DROP TABLE IF EXISTS phone_numbers;
+
+DROP TABLE IF EXISTS sip_gateways;
+
+DROP TABLE IF EXISTS voip_carriers;
+
+DROP TABLE IF EXISTS accounts;
+
+DROP TABLE IF EXISTS applications;
+
+DROP TABLE IF EXISTS service_providers;
+
+DROP TABLE IF EXISTS webhooks;
+
+CREATE TABLE call_routes
+(
+call_route_sid CHAR(36) NOT NULL UNIQUE ,
+priority INTEGER NOT NULL,
+account_sid CHAR(36) NOT NULL,
+regex VARCHAR(255) NOT NULL,
+application_sid CHAR(36) NOT NULL,
+PRIMARY KEY (call_route_sid)
+) COMMENT='a regex-based pattern match for call routing';
+
+CREATE TABLE lcr_routes
+(
+lcr_route_sid CHAR(36),
+regex VARCHAR(32) NOT NULL COMMENT 'regex-based pattern match against dialed number, used for LCR routing of PSTN calls',
+description VARCHAR(1024),
+priority INTEGER NOT NULL UNIQUE COMMENT 'lower priority routes are attempted first',
+PRIMARY KEY (lcr_route_sid)
+) COMMENT='Least cost routing table';
+
+CREATE TABLE api_keys
+(
+api_key_sid CHAR(36) NOT NULL UNIQUE ,
+token CHAR(36) NOT NULL UNIQUE ,
+account_sid CHAR(36),
+service_provider_sid CHAR(36),
+expires_at TIMESTAMP NULL DEFAULT NULL,
+last_used TIMESTAMP NULL DEFAULT NULL,
+created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+PRIMARY KEY (api_key_sid)
+) COMMENT='An authorization token that is used to access the REST api';
+
+CREATE TABLE ms_teams_tenants
+(
+ms_teams_tenant_sid CHAR(36) NOT NULL UNIQUE ,
+service_provider_sid CHAR(36) NOT NULL,
+account_sid CHAR(36) NOT NULL,
+application_sid CHAR(36),
+tenant_fqdn VARCHAR(255) NOT NULL UNIQUE ,
+PRIMARY KEY (ms_teams_tenant_sid)
+) COMMENT='A Microsoft Teams customer tenant';
+
+CREATE TABLE sbc_addresses
+(
+sbc_address_sid CHAR(36) NOT NULL UNIQUE ,
+ipv4 VARCHAR(255) NOT NULL,
+port INTEGER NOT NULL DEFAULT 5060,
+service_provider_sid CHAR(36),
+PRIMARY KEY (sbc_address_sid)
+);
+
+CREATE TABLE users
+(
+user_sid CHAR(36) NOT NULL UNIQUE ,
+name CHAR(36) NOT NULL UNIQUE ,
+hashed_password VARCHAR(1024) NOT NULL,
+salt CHAR(16) NOT NULL,
+force_change BOOLEAN NOT NULL DEFAULT TRUE,
+PRIMARY KEY (user_sid)
+);
+
+CREATE TABLE voip_carriers
+(
+voip_carrier_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL UNIQUE ,
+description VARCHAR(255),
+account_sid CHAR(36) COMMENT 'if provided, indicates this entity represents a customer PBX that is associated with a specific account',
+application_sid CHAR(36) COMMENT 'If provided, all incoming calls from this source will be routed to the associated application',
+e164_leading_plus BOOLEAN NOT NULL DEFAULT false,
+requires_register BOOLEAN NOT NULL DEFAULT false,
+register_username VARCHAR(64),
+register_sip_realm VARCHAR(64),
+register_password VARCHAR(64),
+tech_prefix VARCHAR(16),
+diversion VARCHAR(32),
+is_active BOOLEAN NOT NULL DEFAULT true,
+PRIMARY KEY (voip_carrier_sid)
+) COMMENT='A Carrier or customer PBX that can send or receive calls';
+
+CREATE TABLE phone_numbers
+(
+phone_number_sid CHAR(36) UNIQUE ,
+number VARCHAR(32) NOT NULL UNIQUE ,
+voip_carrier_sid CHAR(36) NOT NULL,
+account_sid CHAR(36),
+application_sid CHAR(36),
+PRIMARY KEY (phone_number_sid)
+) ENGINE=InnoDB COMMENT='A phone number that has been assigned to an account';
+
+CREATE TABLE webhooks
+(
+webhook_sid CHAR(36) NOT NULL UNIQUE ,
+url VARCHAR(1024) NOT NULL,
+method ENUM("GET","POST") NOT NULL DEFAULT 'POST',
+username VARCHAR(255),
+password VARCHAR(255),
+PRIMARY KEY (webhook_sid)
+) COMMENT='An HTTP callback';
+
+CREATE TABLE sip_gateways
+(
+sip_gateway_sid CHAR(36),
+ipv4 VARCHAR(128) NOT NULL COMMENT 'ip address or DNS name of the gateway. For gateways providing inbound calling service, ip address is required.',
+port INTEGER NOT NULL DEFAULT 5060 COMMENT 'sip signaling port',
+inbound BOOLEAN NOT NULL COMMENT 'if true, whitelist this IP to allow inbound calls from the gateway',
+outbound BOOLEAN NOT NULL COMMENT 'if true, include in least-cost routing when placing calls to the PSTN',
+voip_carrier_sid CHAR(36) NOT NULL,
+is_active BOOLEAN NOT NULL DEFAULT 1,
+PRIMARY KEY (sip_gateway_sid)
+) COMMENT='A whitelisted sip gateway used for origination/termination';
+
+CREATE TABLE lcr_carrier_set_entry
+(
+lcr_carrier_set_entry_sid CHAR(36),
+workload INTEGER NOT NULL DEFAULT 1 COMMENT 'represents a proportion of traffic to send through the associated carrier; can be used for load balancing traffic across carriers with a common priority for a destination',
+lcr_route_sid CHAR(36) NOT NULL,
+voip_carrier_sid CHAR(36) NOT NULL,
+priority INTEGER NOT NULL DEFAULT 0 COMMENT 'lower priority carriers are attempted first',
+PRIMARY KEY (lcr_carrier_set_entry_sid)
+) COMMENT='An entry in the LCR routing list';
+
+CREATE TABLE applications
+(
+application_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL,
+account_sid CHAR(36) NOT NULL COMMENT 'account that this application belongs to',
+call_hook_sid CHAR(36) COMMENT 'webhook to call for inbound calls ',
+call_status_hook_sid CHAR(36) COMMENT 'webhook to call for call status events',
+messaging_hook_sid CHAR(36) COMMENT 'webhook to call for inbound SMS/MMS ',
+speech_synthesis_vendor VARCHAR(64) NOT NULL DEFAULT 'google',
+speech_synthesis_language VARCHAR(12) NOT NULL DEFAULT 'en-US',
+speech_synthesis_voice VARCHAR(64),
+speech_recognizer_vendor VARCHAR(64) NOT NULL DEFAULT 'google',
+speech_recognizer_language VARCHAR(64) NOT NULL DEFAULT 'en-US',
+PRIMARY KEY (application_sid)
+) COMMENT='A defined set of behaviors to be applied to phone calls ';
+
+CREATE TABLE service_providers
+(
+service_provider_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL UNIQUE ,
+description VARCHAR(255),
+root_domain VARCHAR(128) UNIQUE ,
+registration_hook_sid CHAR(36),
+ms_teams_fqdn VARCHAR(255),
+PRIMARY KEY (service_provider_sid)
+) COMMENT='A partition of the platform used by one service provider';
+
+CREATE TABLE accounts
+(
+account_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL,
+sip_realm VARCHAR(132) UNIQUE COMMENT 'sip domain that will be used for devices registering under this account',
+service_provider_sid CHAR(36) NOT NULL COMMENT 'service provider that owns the customer relationship with this account',
+registration_hook_sid CHAR(36) COMMENT 'webhook to call when devices underr this account attempt to register',
+device_calling_application_sid CHAR(36) COMMENT 'application to use for outbound calling from an account',
+is_active BOOLEAN NOT NULL DEFAULT true,
+webhook_secret VARCHAR(36),
+disable_cdrs BOOLEAN NOT NULL DEFAULT 0,
+PRIMARY KEY (account_sid)
+) COMMENT='An enterprise that uses the platform for comm services';
+
+CREATE INDEX call_route_sid_idx ON call_routes (call_route_sid);
+ALTER TABLE call_routes ADD FOREIGN KEY account_sid_idxfk (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE call_routes ADD FOREIGN KEY application_sid_idxfk (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX api_key_sid_idx ON api_keys (api_key_sid);
+CREATE INDEX account_sid_idx ON api_keys (account_sid);
+ALTER TABLE api_keys ADD FOREIGN KEY account_sid_idxfk_1 (account_sid) REFERENCES accounts (account_sid);
+
+CREATE INDEX service_provider_sid_idx ON api_keys (service_provider_sid);
+ALTER TABLE api_keys ADD FOREIGN KEY service_provider_sid_idxfk (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+CREATE INDEX ms_teams_tenant_sid_idx ON ms_teams_tenants (ms_teams_tenant_sid);
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY service_provider_sid_idxfk_1 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY account_sid_idxfk_2 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY application_sid_idxfk_1 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX tenant_fqdn_idx ON ms_teams_tenants (tenant_fqdn);
+CREATE INDEX sbc_addresses_idx_host_port ON sbc_addresses (ipv4,port);
+
+CREATE INDEX sbc_address_sid_idx ON sbc_addresses (sbc_address_sid);
+CREATE INDEX service_provider_sid_idx ON sbc_addresses (service_provider_sid);
+ALTER TABLE sbc_addresses ADD FOREIGN KEY service_provider_sid_idxfk_2 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+CREATE INDEX user_sid_idx ON users (user_sid);
+CREATE INDEX name_idx ON users (name);
+CREATE INDEX voip_carrier_sid_idx ON voip_carriers (voip_carrier_sid);
+CREATE INDEX name_idx ON voip_carriers (name);
+ALTER TABLE voip_carriers ADD FOREIGN KEY account_sid_idxfk_3 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE voip_carriers ADD FOREIGN KEY application_sid_idxfk_2 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX phone_number_sid_idx ON phone_numbers (phone_number_sid);
+CREATE INDEX voip_carrier_sid_idx ON phone_numbers (voip_carrier_sid);
+ALTER TABLE phone_numbers ADD FOREIGN KEY voip_carrier_sid_idxfk (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+ALTER TABLE phone_numbers ADD FOREIGN KEY account_sid_idxfk_4 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE phone_numbers ADD FOREIGN KEY application_sid_idxfk_3 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX webhook_sid_idx ON webhooks (webhook_sid);
+CREATE UNIQUE INDEX sip_gateway_idx_hostport ON sip_gateways (ipv4,port);
+
+ALTER TABLE sip_gateways ADD FOREIGN KEY voip_carrier_sid_idxfk_1 (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+ALTER TABLE lcr_carrier_set_entry ADD FOREIGN KEY lcr_route_sid_idxfk (lcr_route_sid) REFERENCES lcr_routes (lcr_route_sid);
+
+ALTER TABLE lcr_carrier_set_entry ADD FOREIGN KEY voip_carrier_sid_idxfk_2 (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+CREATE UNIQUE INDEX applications_idx_name ON applications (account_sid,name);
+
+CREATE INDEX application_sid_idx ON applications (application_sid);
+CREATE INDEX account_sid_idx ON applications (account_sid);
+ALTER TABLE applications ADD FOREIGN KEY account_sid_idxfk_5 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY call_hook_sid_idxfk (call_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY call_status_hook_sid_idxfk (call_status_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY messaging_hook_sid_idxfk (messaging_hook_sid) REFERENCES webhooks (webhook_sid);
+
+CREATE INDEX service_provider_sid_idx ON service_providers (service_provider_sid);
+CREATE INDEX name_idx ON service_providers (name);
+CREATE INDEX root_domain_idx ON service_providers (root_domain);
+ALTER TABLE service_providers ADD FOREIGN KEY registration_hook_sid_idxfk (registration_hook_sid) REFERENCES webhooks (webhook_sid);
+
+CREATE INDEX account_sid_idx ON accounts (account_sid);
+CREATE INDEX sip_realm_idx ON accounts (sip_realm);
+CREATE INDEX service_provider_sid_idx ON accounts (service_provider_sid);
+ALTER TABLE accounts ADD FOREIGN KEY service_provider_sid_idxfk_3 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+ALTER TABLE accounts ADD FOREIGN KEY registration_hook_sid_idxfk_1 (registration_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE accounts ADD FOREIGN KEY device_calling_application_sid_idxfk (device_calling_application_sid) REFERENCES applications (application_sid);
+
+SET FOREIGN_KEY_CHECKS=1;
diff --git a/packer/jambonz-mini/gcp/files/mod_avmd.c.patch b/packer/jambonz-mini/gcp/files/mod_avmd.c.patch
new file mode 100644
index 0000000..ae995b9
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mod_avmd.c.patch
@@ -0,0 +1,25 @@
+--- mod_avmd.c 2022-02-10 11:19:05
++++ mod_avmd.c.new 2023-04-19 13:28:03
+@@ -1476,15 +1476,20 @@
+ }
+ if ((SWITCH_CALL_DIRECTION_OUTBOUND == switch_channel_direction(channel)) && (avmd_session->settings.outbound_channnel == 1)) {
+ flags |= SMBF_READ_REPLACE;
+- direction = "READ_REPLACE";
++ direction = "READ_REPLACE";
+ }
+- if ((SWITCH_CALL_DIRECTION_INBOUND == switch_channel_direction(channel)) && (avmd_session->settings.inbound_channnel == 1)) {
++ if ((SWITCH_CALL_DIRECTION_INBOUND == switch_channel_direction(channel)) /* && (avmd_session->settings.inbound_channnel == 1) */) {
++ /* DCH: for drachtio-fsmrf */
++ flags |= SMBF_READ_REPLACE;
++ direction = "READ_REPLACE";
++/*
+ flags |= SMBF_WRITE_REPLACE;
+ if (!strcmp(direction, "READ_REPLACE")) {
+ direction = "READ_REPLACE | WRITE_REPLACE";
+ } else {
+ direction = "WRITE_REPLACE";
+ }
++*/
+ }
+
+ if (flags == 0) {
diff --git a/packer/jambonz-mini/gcp/files/mod_httapi.c.patch b/packer/jambonz-mini/gcp/files/mod_httapi.c.patch
new file mode 100644
index 0000000..eab370d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mod_httapi.c.patch
@@ -0,0 +1,55 @@
+--- mod_httapi.c 2023-03-01 13:57:28
++++ mod_httapi.c.new 2023-03-01 14:19:34
+@@ -2472,6 +2472,12 @@
+ char *ua = NULL;
+ const char *profile_name = NULL;
+ int tries = 10;
++ int awsSignedUrl = strstr(url, "X-Amz-Signature") != NULL &&
++ strstr(url, "X-Amz-Algorithm") != NULL &&
++ strstr(url, "X-Amz-Credential") != NULL &&
++ strstr(url, "X-Amz-Date") != NULL &&
++ strstr(url, "X-Amz-Expires") != NULL &&
++ strstr(url, "X-Amz-SignedHeaders") != NULL;
+
+ if (context->url_params) {
+ profile_name = switch_event_get_header(context->url_params, "profile_name");
+@@ -2614,7 +2620,7 @@
+ switch_curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *) client);
+ } else {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_HEADER, 1);
+- switch_curl_easy_setopt(curl_handle, CURLOPT_NOBODY, 1);
++ if (!awsSignedUrl) switch_curl_easy_setopt(curl_handle, CURLOPT_NOBODY, 1);
+ }
+
+ if (headers) {
+@@ -2783,6 +2789,12 @@
+ char *metadata;
+ const char *ext = NULL;
+ const char *err_msg = NULL;
++ int awsSignedUrl = strstr(url, "X-Amz-Signature") != NULL &&
++ strstr(url, "X-Amz-Algorithm") != NULL &&
++ strstr(url, "X-Amz-Credential") != NULL &&
++ strstr(url, "X-Amz-Date") != NULL &&
++ strstr(url, "X-Amz-Expires") != NULL &&
++ strstr(url, "X-Amz-SignedHeaders") != NULL;
+
+ load_cache_data(context, url);
+
+@@ -2831,7 +2843,7 @@
+
+ if (!unreachable && !zstr(context->metadata)) {
+ metadata = switch_core_sprintf(context->pool, "%s:%s:%s:%s:%s",
+- url,
++ awsSignedUrl ? context->cache_file : url,
+ switch_event_get_header_nil(headers, "last-modified"),
+ switch_event_get_header_nil(headers, "etag"),
+ switch_event_get_header_nil(headers, "content-length"),
+@@ -2855,7 +2867,7 @@
+
+
+ metadata = switch_core_sprintf(context->pool, "%s:%s:%s:%s:%s",
+- url,
++ awsSignedUrl ? context->cache_file : url,
+ switch_event_get_header_nil(headers, "last-modified"),
+ switch_event_get_header_nil(headers, "etag"),
+ switch_event_get_header_nil(headers, "content-length"),
diff --git a/packer/jambonz-mini/gcp/files/mod_opusfile.c.patch b/packer/jambonz-mini/gcp/files/mod_opusfile.c.patch
new file mode 100644
index 0000000..08d43cb
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mod_opusfile.c.patch
@@ -0,0 +1,13 @@
+--- mod_opusfile.c 2019-09-25 08:55:37.000000000 -0400
++++ mod_opusfile.c.new 2020-01-02 10:24:57.000000000 -0500
+@@ -282,7 +282,9 @@
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "[OGG/OPUS File] Channels: %i\n", head->channel_count);
+ if (head->input_sample_rate) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "[OGG/OPUS File] Original sampling rate: %lu Hz\n", (unsigned long)head->input_sample_rate);
+- handle->samplerate = context->samplerate = head->input_sample_rate;
++ // DH: per https://github.com/xiph/opusfile/blob/d2577d7fdfda04bc32a853e80e62d6faa2a20859/include/opusfile.h#L56
++ // the API always decodes to 48kHz, and we should not be telling freeswitch to play out the originally recorded sample rate
++ // handle->samplerate = context->samplerate = head->input_sample_rate;
+ }
+ }
+ if (op_seekable(context->of)) {
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.in.extra b/packer/jambonz-mini/gcp/files/modules.conf.in.extra
new file mode 100644
index 0000000..081860d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.in.extra
@@ -0,0 +1,187 @@
+applications/mod_audio_fork
+applications/mod_aws_lex
+applications/mod_aws_transcribe
+applications/mod_azure_transcribe
+applications/mod_deepgram_transcribe
+applications/mod_google_tts
+applications/mod_google_transcribe
+applications/mod_ibm_transcribe
+applications/mod_jambonz_transcribe
+applications/mod_nuance_transcribe
+applications/mod_nvidia_transcribe
+applications/mod_soniox_transcribe
+applications/mod_dialogflow
+#applications/mod_abstraction
+#applications/mod_av
+applications/mod_avmd
+#applications/mod_bert
+#applications/mod_blacklist
+#applications/mod_callcenter
+#applications/mod_cidlookup
+#applications/mod_cluechoo
+applications/mod_commands
+applications/mod_conference
+#applications/mod_curl
+#applications/mod_cv
+#applications/mod_db
+#applications/mod_directory
+#applications/mod_distributor
+applications/mod_dptools
+#applications/mod_easyroute
+#applications/mod_enum
+#applications/mod_esf
+#applications/mod_esl
+#applications/mod_expr
+#applications/mod_fifo
+#applications/mod_fsk
+#applications/mod_fsv
+#applications/mod_hash
+#applications/mod_hiredis
+applications/mod_httapi
+#applications/mod_http_cache
+#applications/mod_ladspa
+#applications/mod_lcr
+#applications/mod_memcache
+#applications/mod_mongo
+#applications/mod_mp4
+#applications/mod_mp4v2
+#applications/mod_nibblebill
+#applications/mod_oreka
+#applications/mod_osp
+#applications/mod_prefix
+#applications/mod_rad_auth
+#applications/mod_redis
+#applications/mod_rss
+#applications/mod_signalwire
+#applications/mod_sms
+#applications/mod_sms_flowroute
+#applications/mod_snapshot
+#applications/mod_snom
+#applications/mod_sonar
+#applications/mod_soundtouch
+applications/mod_spandsp
+#applications/mod_spy
+#applications/mod_stress
+#applications/mod_translate
+#applications/mod_valet_parking
+#applications/mod_video_filter
+#applications/mod_vmd
+#applications/mod_voicemail
+#applications/mod_voicemail_ivr
+#asr_tts/mod_cepstral
+#asr_tts/mod_flite
+#asr_tts/mod_pocketsphinx
+#asr_tts/mod_tts_commandline
+#asr_tts/mod_unimrcp
+codecs/mod_amr
+#codecs/mod_amrwb
+#codecs/mod_b64
+#codecs/mod_bv
+#codecs/mod_clearmode
+#codecs/mod_codec2
+#codecs/mod_com_g729
+#codecs/mod_dahdi_codec
+codecs/mod_g723_1
+codecs/mod_g729
+codecs/mod_h26x
+#codecs/mod_ilbc
+#codecs/mod_isac
+#codecs/mod_mp4v
+codecs/mod_opus
+#codecs/mod_sangoma_codec
+#codecs/mod_silk
+#codecs/mod_siren
+#codecs/mod_theora
+#databases/mod_mariadb
+#databases/mod_pgsql
+#dialplans/mod_dialplan_asterisk
+#dialplans/mod_dialplan_directory
+dialplans/mod_dialplan_xml
+#directories/mod_ldap
+#endpoints/mod_alsa
+#endpoints/mod_dingaling
+#endpoints/mod_gsmopen
+#endpoints/mod_h323
+#endpoints/mod_khomp
+#endpoints/mod_loopback
+#endpoints/mod_opal
+#endpoints/mod_portaudio
+endpoints/mod_rtc
+#endpoints/mod_rtmp
+#endpoints/mod_skinny
+endpoints/mod_sofia
+#endpoints/mod_verto
+#event_handlers/mod_amqp
+event_handlers/mod_cdr_csv
+#event_handlers/mod_cdr_mongodb
+#event_handlers/mod_cdr_pg_csv
+#event_handlers/mod_cdr_sqlite
+#event_handlers/mod_erlang_event
+#event_handlers/mod_event_multicast
+event_handlers/mod_event_socket
+#event_handlers/mod_fail2ban
+#event_handlers/mod_format_cdr
+#event_handlers/mod_json_cdr
+#event_handlers/mod_radius_cdr
+#event_handlers/mod_odbc_cdr
+#event_handlers/mod_kazoo
+#event_handlers/mod_rayo
+#event_handlers/mod_smpp
+#event_handlers/mod_snmp
+#event_handlers/mod_event_zmq
+#formats/mod_imagick
+formats/mod_local_stream
+formats/mod_native_file
+#formats/mod_png
+#formats/mod_portaudio_stream
+#formats/mod_shell_stream
+formats/mod_shout
+formats/mod_sndfile
+#formats/mod_ssml
+formats/mod_tone_stream
+#formats/mod_vlc
+formats/mod_opusfile
+#languages/mod_basic
+#languages/mod_java
+#languages/mod_lua
+#languages/mod_managed
+#languages/mod_perl
+#languages/mod_python
+#languages/mod_v8
+#languages/mod_yaml
+loggers/mod_console
+#loggers/mod_graylog2
+loggers/mod_logfile
+loggers/mod_syslog
+#loggers/mod_raven
+#say/mod_say_de
+say/mod_say_en
+#say/mod_say_es
+#say/mod_say_es_ar
+#say/mod_say_fa
+#say/mod_say_fr
+#say/mod_say_he
+#say/mod_say_hr
+#say/mod_say_hu
+#say/mod_say_it
+#say/mod_say_ja
+#say/mod_say_nl
+#say/mod_say_pl
+#say/mod_say_pt
+#say/mod_say_ru
+#say/mod_say_sv
+#say/mod_say_th
+#say/mod_say_zh
+#timers/mod_posix_timer
+#timers/mod_timerfd
+xml_int/mod_xml_cdr
+#xml_int/mod_xml_curl
+#xml_int/mod_xml_ldap
+#xml_int/mod_xml_radius
+#xml_int/mod_xml_rpc
+#xml_int/mod_xml_scgi
+
+#../../libs/freetdm/mod_freetdm
+
+## Experimental Modules (don't cry if they're broken)
+#../../contrib/mod/xml_int/mod_xml_odbc
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.in.grpc.patch b/packer/jambonz-mini/gcp/files/modules.conf.in.grpc.patch
new file mode 100644
index 0000000..4d43e27
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.in.grpc.patch
@@ -0,0 +1,10 @@
+--- modules.conf.in 2019-10-23 15:09:23.114079884 +0000
++++ modules.conf.in.new 2019-10-23 15:10:08.330364591 +0000
+@@ -1,4 +1,7 @@
+ applications/mod_audio_fork
++applications/mod_google_tts
++applications/mod_google_transcribe
++applications/mod_dialogflow
+ #applications/mod_abstraction
+ applications/mod_av
+ #applications/mod_avmd
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.in.patch b/packer/jambonz-mini/gcp/files/modules.conf.in.patch
new file mode 100644
index 0000000..a470b5c
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.in.patch
@@ -0,0 +1,139 @@
+--- modules.conf.in 2019-09-25 08:55:34.000000000 -0400
++++ modules.conf.in.new 2020-01-02 10:36:07.000000000 -0500
+@@ -1,5 +1,6 @@
++applications/mod_audio_fork
+ #applications/mod_abstraction
+-applications/mod_av
++#applications/mod_av
+ #applications/mod_avmd
+ #applications/mod_bert
+ #applications/mod_blacklist
+@@ -10,19 +11,19 @@
+ applications/mod_conference
+ #applications/mod_curl
+ #applications/mod_cv
+-applications/mod_db
++#applications/mod_db
+ #applications/mod_directory
+ #applications/mod_distributor
+ applications/mod_dptools
+ #applications/mod_easyroute
+-applications/mod_enum
+-applications/mod_esf
++#applications/mod_enum
++#applications/mod_esf
+ #applications/mod_esl
+-applications/mod_expr
+-applications/mod_fifo
++#applications/mod_expr
++#applications/mod_fifo
+ #applications/mod_fsk
+-applications/mod_fsv
+-applications/mod_hash
++#applications/mod_fsv
++#applications/mod_hash
+ #applications/mod_hiredis
+ applications/mod_httapi
+ #applications/mod_http_cache
+@@ -39,8 +40,8 @@
+ #applications/mod_rad_auth
+ #applications/mod_redis
+ #applications/mod_rss
+-applications/mod_signalwire
+-applications/mod_sms
++#applications/mod_signalwire
++#applications/mod_sms
+ #applications/mod_sms_flowroute
+ #applications/mod_snapshot
+ #applications/mod_snom
+@@ -50,10 +51,10 @@
+ #applications/mod_spy
+ #applications/mod_stress
+ #applications/mod_translate
+-applications/mod_valet_parking
++#applications/mod_valet_parking
+ #applications/mod_video_filter
+ #applications/mod_vmd
+-applications/mod_voicemail
++#applications/mod_voicemail
+ #applications/mod_voicemail_ivr
+ #asr_tts/mod_cepstral
+ #asr_tts/mod_flite
+@@ -62,7 +63,7 @@
+ #asr_tts/mod_unimrcp
+ codecs/mod_amr
+ #codecs/mod_amrwb
+-codecs/mod_b64
++#codecs/mod_b64
+ #codecs/mod_bv
+ #codecs/mod_clearmode
+ #codecs/mod_codec2
+@@ -80,8 +81,8 @@
+ #codecs/mod_siren
+ #codecs/mod_theora
+ #databases/mod_mariadb
+-databases/mod_pgsql
+-dialplans/mod_dialplan_asterisk
++#databases/mod_pgsql
++#dialplans/mod_dialplan_asterisk
+ #dialplans/mod_dialplan_directory
+ dialplans/mod_dialplan_xml
+ #directories/mod_ldap
+@@ -90,19 +91,19 @@
+ #endpoints/mod_gsmopen
+ #endpoints/mod_h323
+ #endpoints/mod_khomp
+-endpoints/mod_loopback
++#endpoints/mod_loopback
+ #endpoints/mod_opal
+ #endpoints/mod_portaudio
+ endpoints/mod_rtc
+ #endpoints/mod_rtmp
+-endpoints/mod_skinny
++#endpoints/mod_skinny
+ endpoints/mod_sofia
+-endpoints/mod_verto
++#endpoints/mod_verto
+ #event_handlers/mod_amqp
+ event_handlers/mod_cdr_csv
+ #event_handlers/mod_cdr_mongodb
+ #event_handlers/mod_cdr_pg_csv
+-event_handlers/mod_cdr_sqlite
++#event_handlers/mod_cdr_sqlite
+ #event_handlers/mod_erlang_event
+ #event_handlers/mod_event_multicast
+ event_handlers/mod_event_socket
+@@ -119,18 +120,18 @@
+ #formats/mod_imagick
+ formats/mod_local_stream
+ formats/mod_native_file
+-formats/mod_png
++#formats/mod_png
+ #formats/mod_portaudio_stream
+ #formats/mod_shell_stream
+-#formats/mod_shout
++formats/mod_shout
+ formats/mod_sndfile
+ #formats/mod_ssml
+ formats/mod_tone_stream
+ #formats/mod_vlc
+-#formats/mod_opusfile
++formats/mod_opusfile
+ #languages/mod_basic
+ #languages/mod_java
+-languages/mod_lua
++#languages/mod_lua
+ #languages/mod_managed
+ #languages/mod_perl
+ #languages/mod_python
+@@ -165,8 +166,8 @@
+ #xml_int/mod_xml_curl
+ #xml_int/mod_xml_ldap
+ #xml_int/mod_xml_radius
+-xml_int/mod_xml_rpc
+-xml_int/mod_xml_scgi
++#xml_int/mod_xml_rpc
++#xml_int/mod_xml_scgi
+
+ #../../libs/freetdm/mod_freetdm
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.patch b/packer/jambonz-mini/gcp/files/modules.conf.patch
new file mode 100644
index 0000000..cc29321
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.patch
@@ -0,0 +1,118 @@
+--- modules.conf 2017-04-25 18:44:46.772490196 +0000
++++ modules.conf.new 2017-04-25 18:47:05.967886830 +0000
+@@ -10,19 +10,19 @@
+ applications/mod_conference
+ #applications/mod_curl
+ #applications/mod_cv
+-applications/mod_db
++#applications/mod_db
+ #applications/mod_directory
+ #applications/mod_distributor
+ applications/mod_dptools
+ #applications/mod_easyroute
+-applications/mod_enum
+-applications/mod_esf
++#applications/mod_enum
++#applications/mod_esf
+ #applications/mod_esl
+ applications/mod_expr
+-applications/mod_fifo
++#applications/mod_fifo
+ #applications/mod_fsk
+-applications/mod_fsv
+-applications/mod_hash
++#applications/mod_fsv
++#applications/mod_hash
+ #applications/mod_hiredis
+ applications/mod_httapi
+ #applications/mod_http_cache
+@@ -39,19 +39,19 @@
+ #applications/mod_rad_auth
+ #applications/mod_redis
+ #applications/mod_rss
+-applications/mod_sms
++#applications/mod_sms
+ #applications/mod_sms_flowroute
+ #applications/mod_snapshot
+ #applications/mod_snom
+ #applications/mod_sonar
+ #applications/mod_soundtouch
+-applications/mod_spandsp
++#applications/mod_spandsp
+ #applications/mod_spy
+ #applications/mod_stress
+ #applications/mod_translate
+-applications/mod_valet_parking
++#applications/mod_valet_parking
+ #applications/mod_vmd
+-applications/mod_voicemail
++#applications/mod_voicemail
+ #applications/mod_voicemail_ivr
+ #asr_tts/mod_cepstral
+ #asr_tts/mod_flite
+@@ -67,7 +67,7 @@
+ #codecs/mod_com_g729
+ #codecs/mod_dahdi_codec
+ codecs/mod_g723_1
+-codecs/mod_g729
++#codecs/mod_g729
+ codecs/mod_h26x
+ #codecs/mod_ilbc
+ #codecs/mod_isac
+@@ -77,7 +77,7 @@
+ #codecs/mod_silk
+ #codecs/mod_siren
+ #codecs/mod_theora
+-dialplans/mod_dialplan_asterisk
++#dialplans/mod_dialplan_asterisk
+ #dialplans/mod_dialplan_directory
+ dialplans/mod_dialplan_xml
+ #directories/mod_ldap
+@@ -89,17 +89,17 @@
+ endpoints/mod_loopback
+ #endpoints/mod_opal
+ #endpoints/mod_portaudio
+-endpoints/mod_rtc
++#endpoints/mod_rtc
+ #endpoints/mod_rtmp
+-endpoints/mod_skinny
++#endpoints/mod_skinny
+ #endpoints/mod_skypopen
+ endpoints/mod_sofia
+-endpoints/mod_verto
++#endpoints/mod_verto
+ #event_handlers/mod_amqp
+-event_handlers/mod_cdr_csv
++#event_handlers/mod_cdr_csv
+ #event_handlers/mod_cdr_mongodb
+ #event_handlers/mod_cdr_pg_csv
+-event_handlers/mod_cdr_sqlite
++#event_handlers/mod_cdr_sqlite
+ #event_handlers/mod_erlang_event
+ #event_handlers/mod_event_multicast
+ event_handlers/mod_event_socket
+@@ -125,7 +125,7 @@
+ #formats/mod_vlc
+ #languages/mod_basic
+ #languages/mod_java
+-languages/mod_lua
++#languages/mod_lua
+ #languages/mod_managed
+ #languages/mod_perl
+ #languages/mod_python
+@@ -155,12 +155,12 @@
+ #say/mod_say_zh
+ #timers/mod_posix_timer
+ #timers/mod_timerfd
+-xml_int/mod_xml_cdr
++#xml_int/mod_xml_cdr
+ #xml_int/mod_xml_curl
+ #xml_int/mod_xml_ldap
+ #xml_int/mod_xml_radius
+-xml_int/mod_xml_rpc
+-xml_int/mod_xml_scgi
++#xml_int/mod_xml_rpc
++#xml_int/mod_xml_scgi
+
+ #../../libs/freetdm/mod_freetdm
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.extra b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.extra
new file mode 100644
index 0000000..4774bf7
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.extra
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc
new file mode 100644
index 0000000..beee2ad
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc
@@ -0,0 +1,151 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc.patch b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc.patch
new file mode 100644
index 0000000..3d5cf68
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.grpc.patch
@@ -0,0 +1,12 @@
+--- modules.conf.xml 2019-05-15 21:08:29.049449029 +0000
++++ modules.conf.xml.new 2019-05-15 22:05:00.303623468 +0000
+@@ -7,6 +7,9 @@
+
+
+
++
++
++
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.lws b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.lws
new file mode 100644
index 0000000..835d72c
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.lws
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.patch b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.patch
new file mode 100644
index 0000000..57aca31
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.vanilla.xml.patch
@@ -0,0 +1,105 @@
+--- modules.conf.xml 2019-09-30 19:01:33.304020805 +0000
++++ modules.conf.xml.new 2019-09-30 23:11:23.371830901 +0000
+@@ -1,5 +1,6 @@
+
+
++
+
+
+
+@@ -10,7 +11,7 @@
+
+
+
+-
++
+
+
+
+@@ -39,7 +40,7 @@
+
+
+
+-
++
+
+
+
+@@ -47,28 +48,28 @@
+
+
+
+-
++
+
+
+-
++
+
+
+
+-
++
+
+-
+-
+-
++
++
++
+
+-
++
+
+
+
+
+-
+-
++
++
+
+-
++
+
+
+
+@@ -87,7 +88,7 @@
+
+
+
+-
++
+
+
+
+@@ -96,17 +97,17 @@
+
+
+
+-
++
+
+
+
+
+
+-
++
+
+
+
+-
++
+
+
+
+@@ -123,7 +124,7 @@
+
+
+
+-
++
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/modules.conf.xml b/packer/jambonz-mini/gcp/files/modules.conf.xml
new file mode 100644
index 0000000..b1fc065
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/modules.conf.xml
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/mrf_dialplan.xml b/packer/jambonz-mini/gcp/files/mrf_dialplan.xml
new file mode 100644
index 0000000..eaf85f2
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mrf_dialplan.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/mrf_sip_profile.xml b/packer/jambonz-mini/gcp/files/mrf_sip_profile.xml
new file mode 100644
index 0000000..ba7559c
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mrf_sip_profile.xml
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/mysql-server.key b/packer/jambonz-mini/gcp/files/mysql-server.key
new file mode 100644
index 0000000..4686449
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/mysql-server.key
@@ -0,0 +1,432 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQGiBD4+owwRBAC14GIfUfCyEDSIePvEW3SAFUdJBtoQHH/nJKZyQT7h9bPlUWC3
+RODjQReyCITRrdwyrKUGku2FmeVGwn2u2WmDMNABLnpprWPkBdCk96+OmSLN9brZ
+fw2vOUgCmYv2hW0hyDHuvYlQA/BThQoADgj8AW6/0Lo7V1W9/8VuHP0gQwCgvzV3
+BqOxRznNCRCRxAuAuVztHRcEAJooQK1+iSiunZMYD1WufeXfshc57S/+yeJkegNW
+hxwR9pRWVArNYJdDRT+rf2RUe3vpquKNQU/hnEIUHJRQqYHo8gTxvxXNQc7fJYLV
+K2HtkrPbP72vwsEKMYhhr0eKCbtLGfls9krjJ6sBgACyP/Vb7hiPwxh6rDZ7ITnE
+kYpXBACmWpP8NJTkamEnPCia2ZoOHODANwpUkP43I7jsDmgtobZX9qnrAXw+uNDI
+QJEXM6FSbi0LLtZciNlYsafwAPEOMDKpMqAK6IyisNtPvaLd8lH0bPAnWqcyefep
+rv0sxxqUEMcM3o7wwgfN83POkDasDbs3pjwPhxvhz6//62zQJ7Q2TXlTUUwgUmVs
+ZWFzZSBFbmdpbmVlcmluZyA8bXlzcWwtYnVpbGRAb3NzLm9yYWNsZS5jb20+iGwE
+ExECACwCGyMCHgECF4ACGQEGCwkIBwMCBhUKCQgCAwUWAgMBAAUCXEBY+wUJI87e
+5AAKCRCMcY07UHLh9RZPAJ9uvm0zlzfCN+DHxHVaoFLFjdVYTQCfborsC9tmEZYa
+whhogjeBkZkorbyIaQQTEQIAKQIbIwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAhkB
+BQJTAdRmBQkaZsvLAAoJEIxxjTtQcuH1X4MAoKNLWAbCBUj96637kv6Xa/fJuX5m
+AJwPtmgDfjUe2iuhXdTrFEPT19SB6ohmBBMRAgAmAhsjBgsJCAcDAgQVAggDBBYC
+AwECHgECF4AFAk53PioFCRP7AhUACgkQjHGNO1By4fUmzACeJdfqgc9gWTUhgmcM
+AOmG4RjwuxcAoKfM+U8yMOGELi+TRif7MtKEms6piGkEExECACkCGyMGCwkIBwMC
+BBUCCAMEFgIDAQIeAQIXgAIZAQUCUZSROgUJFTchqgAKCRCMcY07UHLh9YtAAJ9X
+rA/ymlmozPZn+A9ls8/uwMcTsQCfaQMNq1dNkhH2kyByc3Rx9/W2xfqJARwEEAEC
+AAYFAlAS6+UACgkQ8aIC+GoXHivrWwf/dtLk/x+NC2VMDlg+vOeM0qgG1IlhXZfi
+NsEisvvGaz4m8fSFRGe+1bvvfDoKRhxiGXU48RusjixzvBb6KTMuY6JpOVfz9Dj3
+H9spYriHa+i6rYySXZIpOhfLiMnTy7NH2OvYCyNzSS/ciIUACIfH/2NH8zNT5CNF
+1uPNRs7HsHzzz7pOlTjtTWiF4cq/Ij6Z6CNrmdj+SiMvjYN9u6sdEKGtoNtpycgD
+5HGKR+I7Nd/7v56yhaUe4FpuvsNXig86K9tI6MUFS8CUyy7Hj3kVBZOUWVBM053k
+nGdALSygQr50DA3jMGKVl4ZnHje2RVWRmFTr5YWoRTMxUSQPMLpBNIkBHAQQAQIA
+BgUCU1B+vQAKCRAohbcD0zcc8dWwCACWXXWDXIcAWRUw+j3ph8dr9u3SItljn3wB
+c7clpclKWPuLvTz7lGgzlVB0s8hH4xgkSA+zLzl6u56mpUzskFl7f1I3Ac9GGpM4
+0M5vmmR9hwlD1HdZtGfbD+wkjlqgitNLoRcGdRf/+U7x09GhSS7Bf339sunIX6sM
+gXSC4L32D3zDjF5icGdb0kj+3lCrRmp853dGyA3ff9yUiBkxcKNawpi7Vz3D2ddU
+pOF3BP+8NKPg4P2+srKgkFbd4HidcISQCt3rY4vaTkEkLKg0nNA6U4r0YgOa7wIT
+SsxFlntMMzaRg53QtK0+YkH0KuZR3GY8B7pi+tlgycyVR7mIFo7riQEcBBABCAAG
+BQJWgVd0AAoJEEZu4b/gk4UKk9MH/Rnt7EccPjSJC5CrB2AU5LY2Dsr+PePI2ubP
+WsEdG82qSjjGpbhIH8LSg/PzQoGHiFWMmmZWJktRT+dcgLbs3b2VwCNAwCE8jOHd
+UkQhEowgomdNvHiBHKHjP4/lF68KOPiO/2mxYYkmpM7BWf3kB57DJ5CTi3/JLoN7
+zF40qIs/p09ePvnwStpglbbtUn7XPO+1/Ee8VHzimABom52PkQIuxNiVUzLVn3bS
+Wqrd5ecuqLk6yzjPXd2XhDHWC9Twpl68GePru6EzQtusi0m6S/sHgEXqh/IxrFZV
+JlljF75JvosZq5zeulr0i6kOij+Y1p6MFffihITZ1gTmk+CLvK2JASIEEAECAAwF
+Ak53QS4FAwASdQAACgkQlxC4m8pXrXwJ8Qf/be/UO9mqfoc2sMyhwMpN4/fdBWwf
+LkA12FXQDOQMvwH9HsmEjnfUgYKXschZRi+DuHXe1P7l8G2aQLubhBsQf9ejKvRF
+TzuWMQkdIq+6Koulxv6ofkCcv3d1xtO2W7nb5yxcpVBPrRfGFGebJvZa58DymCNg
+yGtAU6AOz4veavNmI2+GIDQsY66+tYDvZ+CxwzdYu+HDV9HmrJfc6deM0mnBn7SR
+jqzxJPgoTQhihTav6q/R5/2p5NvQ/H84OgS6GjosfGc2duUDzCP/kheMRKfzuyKC
+OHQPtJuIj8++gfpHtEU7IDUX1So3c9n0PdpeBvclsDbpRnCNxQWU4mBot4kBIgQQ
+AQIADAUCToi2GQUDABJ1AAAKCRCXELibyletfLZAB/9oRqx+NC98UQD/wlxCRytz
+vi/MuPnbgQUPLHEap10tvEi33S/H/xDR/tcGofY4cjAvo5skZXXeWq93Av7PACUb
+zkg0X0eSr2oL6wy66xfov72AwSuX+iUK68qtKaLqRLitM02y8aNRV/ggKvt7UMvG
+mOvs5yLaYlobyvGaFC2ClfkNOt2MlVnQZCmnYBCwOktPGkExiu2yZMifcYGxQcpH
+KVFG59KeF2cM2d4xYM8HJqkSGGW306LFVSyeRwG+wbttgLpD5bM/T2b3fF/J35ra
+CSMLZearRTq8aygPl+XM7MM2eR946aw6jmOsgNBErbvvIdQj6LudAZj+8imcXV2K
+iQEiBBABAgAMBQJOmdnRBQMAEnUAAAoJEJcQuJvKV618AvIIAIEF1ZJ+Ry7WOdKF
+5oeQ/ynaYUigzN92fW/9zB8yuQlngkFJGidYMbci1tR1siziIVJFusR3ZonqAPGK
+/SUta9Y6KWLhmc7c5UnEHklq/NfdMZ2WVSIykXlctqw0sbb+z1ecEd4G8u9j5ill
+MO1B36rQayYAPoeXLX8dY4VyFLVGaQ00rWQBYFZrpw16ATWbWGJP332NSfCk4zZq
+6kXEW07q0st3YBgAAGdNQyEeZCa4d4pBRSX6189Kjg6GDnIcaiOF6HO6PLr9fRlL
+r5ObCgU+G9gEhfiVwDEV9E+7/Bq2pYZ9whhkBqWQzdpXTNTM24uaEhE01EPO5zeC
+O214q6mJASIEEAECAAwFAk6rpgEFAwASdQAACgkQlxC4m8pXrXzAhwf/f9O99z16
+3Y5FZVIxexyqXQ/Mct9uKHuXEVnRFYbA49dQLD4S73N+zN7gn9jFeQcBo4w8qVUV
+94U/ta/VbLkdtNREyplPM4XY8YE5Wfd9bfyg3q1PbEiVjk995sBF+2+To99YYKst
+gXPqjlH0jUfEyDmexOj+hsp8Rc63kvkIx36VBa4ONRYFefGAhKDMigL2YAhc1UkG
+tkGTuLmlCGwIV6lviDZD3RJf5375VFnaHv7eXfwQxCwE+BxG3CURrjfxjaxMTmMP
+yAG2rhDp5oTUEvqDYNbko5UxYOmrSjvF4FzXwqerElXJUkUzSh0pp7RxHB/1lCxD
+s7D1F1hlgFQuNIkBIgQQAQIADAUCTrzZHAUDABJ1AAAKCRCXELibyletfMUpB/4s
+07dREULIBnA1D6qr3fHsQJNZqbAuyDlvgGGLWzoyEDs+1JMFFlaa+EeLIo1386GU
+2DammDC23p3IB79uQhJeD2Z1TcVg4cA64SfF/CHca5coeRSrdAiudzU/cgLGtXIP
+/OaFamXgdMxAhloLFbSHPCZkyb00phVa8+xeIVDrK1HByZsNIXy/SSK8U26S2PVZ
+2o14fWvKbJ1Aga8N6DuWY/D8P2mi3RAbiuZgfzkmKL5idH/wSKfnFKdTgJzssdCc
+1jZEGVk5rFYcWOrJARHeP/tsnb/UxKBEsNtO7e3N2e/rLVnEykVIO066hz7xZK/V
+NBSpx3k3qj4XPK41IHy2iQEiBBABAgAMBQJOzqO8BQMAEnUAAAoJEJcQuJvKV618
+2twH/0IzjXLxN45nvIfEjC75a+i9ZSLlqR8lsHL4GpEScFKI0a0lT4IVAIY2RKG+
+MAs2eHm0UfKuwGs5jluRZ9RqKrc61sY0XQV9/7znY9Db16ghX04JjknOKs/fPi87
+rvKkB/QxJWS8qbb/erRmW+cPNjbRxTFPS5JIwFWHA16ieFEpvdAgKV6nfvJVTq1r
+jPDcnIA9CJN2SmUFx9Qx3SRc6ITbam1hjFnY6sCh6AUhxLI2f1mq1xH9PqEy42Um
+68prRqTyJ7Iox1g/UDDkeeUcAg7T1viTz7uXpS3Wrq4zzo4yOpaJfLDR3pI5g2Zk
+SNGTMo6aySE4OABt8i1Pc1Pm6AmJASIEEAECAAwFAk7yPFYFAwASdQAACgkQlxC4
+m8pXrXzXiAf9FrXe0lgcPM+tYOWMLhv5gXJi2VUBaLxpyRXm/kJcmxInKq1GCd3y
+D4/FLHNu3ZcCz/uklPAbZXWI0O6ewq0LWsRtklmJjWiedH+hGyaTv95VklojRIBd
+8nBaJ6M98rljMBHTFwWvjQFVf4FLRJQZqHlvjcCkq2Dd9BWJpGXvr/gpKkmMJYNK
+/ftfZRcChb35NI19WRpOhj9u808OPcqKVvZBcPwFGV5cEBzmAC94J7JcD8+S8Ik8
+iUJMQGGL3QcmZOBozovh86hj7KTSEBHlLXl832z89H1hLeuLbnXoGLv3zeUFSxkv
+1h35LhZLqIMDQRXLuUzxGHMBpLhPyGWRJ4kBIgQQAQIADAUCTwQJFwUDABJ1AAAK
+CRCXELibyletfABvB/9Cy69cjOqLGywITs3Cpg//40jmdhSAVxilJivP6J5bubFH
+DJlVTx541Dv5h4hTG2BQuueQ4q1VCpSGW+rHcdhPyvmZGRz1rxdQQGh1Dv0Bod2c
+3PJVSYPSrRSwCZJkJHOtVRBdjK4mkZb5aFTza+Tor9kxzj4FcXVd4KAS+hHQHYHc
+Ar8tt2eOLzqdEFTULeGiSoNn+PVzvzdfhndphK+8F2jfQ2UKuc01O7k0Yn9xZVx0
+OG6fE1gStzLv7C5amWLRd8+xh+MN0G8MgNglpBoExsEMMlPBYSUHa6lxpdMNMuib
+rIyVncE9X8QOhImt8K0sNn/EdbuldJNGYbDLt7O4iQEiBBABAgAMBQJPFdTcBQMA
+EnUAAAoJEJcQuJvKV6184owH+wZ/uLpezXnSxigeH1sig72QEXMrNd5DVHCJdig3
+bo+K5YmmN710/m5z+63XKUEWpd6/knajObgckThzWftNeK1SSFQGPmoYZP9EZnSU
+7L+/dSUpExbj842G5LYagrCyMGtlxRywWEmbi72TKS/JOK0jLiOdvVy+PHrZSu0D
+TVQ7cJh1BmPsbz7zzxjmcI5l+7B7K7RHZHq45nDLoIabwDacj7BXvBK0Ajqz4QyJ
+GQUjXC7q+88I+ptPvOXlE5nI/NbiCJOMI6d/bWN1KwYrC80fZuFaznfQFcPyUaDw
+yRaun+K3kEji2wXecq+yMmLUEp01TKsUeOL50HD6hHH07W+JASIEEAECAAwFAk85
+bQsFAwASdQAACgkQlxC4m8pXrXwKPQgAlkbUsTr7nkq+haOk0jKpaHWEbRMEGMrB
+I3F7E+RDO6V/8y4Jtn04EYDc8GgZMBah+mOgeINq3y8jRMYV5jVtZXv2MWYFUcjM
+kVBKeqhi/pGEjmUdmdt3DlPv3Z+fMTMRmAocI981iY/go8PVPg/+nrR6cFK2xxnO
+R8TacikJBFeSfkkORg1tDzjjYv1B5ZIEkpplepl5ahJBBq7cpYhTdY6Yk0Sz0J8w
+EdffLSaNxrRuWLrRhWzZU7p9bFzfb/7OHc21dJnB7wKv5VvtgE+jiQw9tOKaf5hc
+SgRYuF6heu+B25gc5Uu88lo409mZ7oxQ6hDCn7JHvzh0rhmSN+Kid4kBIgQQAQIA
+DAUCT0qQrQUDABJ1AAAKCRCXELibyletfC9UB/4o2ggJYM0CLxEpP0GU8UKOh3+/
+zm1DN7Qe4kY2iCtF1plKHQaTgt5FlgRCFaiXcVv7WzGz/FnmxonR1leLl+kfRlwy
+PPnoI/AWPCy/NO4Cl5KnjsSmsdDUpObwZ4KYsdilZR7ViJu2swdAIgnXBUwrlRJR
+7CK4TAKrTeonRgVSrVx8Vt//8/cYj73CLq8oY/KK0iHiQrSwo44uyhdiFIAssjyX
+n6/2E+w0zgvPexNSNNROHQ8pjbq+NTY6GwKIGsaej3UTRwQ7psvKXz8y7xdzmOAr
+/khGvxB5gjkx02pimjeia8v66aH6rbnojJMAovNUS4EHdHnulv4rovC8Kf9iiQEi
+BBABAgAMBQJPVdsaBQMAEnUAAAoJEJcQuJvKV618vVEIALFXPBzcAO1SnQarBLzy
+YMVZZumPvSXKnUHAO+6kjApXPJ+qFRdUaSNshZxVKY9Zryblu4ol/fLUTt0CliSD
+IxD6L4GXEm4VYYCl4lPO3bVsJnGITLFwQGHM27EmjVoTiD8Ch7kPq2EXr3dMRgzj
+pdz+6aHGSUfOdLTPXufDvW83bEWGaRVuTJKw+wIrcuRqQ+ucWJgJGwcE4zeHjZad
+Jx1XUm1X+BbI73uiQussyjhhQVVNU7QEdrjyuscaZ/H38wjUwNbylxDPB4I8quC1
+knQ0wSHr7gKpM+E9nhiS14poRqU18u78/sJ2MUPXnQA6533IC238/LP8JgqB+BiQ
+BTSJASIEEAECAAwFAk9ng3cFAwASdQAACgkQlxC4m8pXrXxQRAf/UZlkkpFJj1om
+9hIRz7gS+l7YvTaKSzpo+TBcx3C7aqKJpir6TlMK9cb9HGTHo2Xp1N3FtQL72NvO
+6CcJpBURbvSyb4i0hrm/YcbUC4Y3eajWhkRS3iVfGNFbc/rHthViz0r6Y5lhXX16
+aVkDv5CIFWaF3BiUK0FnHrZiy4FPacUXCwEjv3uf8MpxV5oEmo8Vs1h4TL3obyUz
+qrImFrEMYE/12lkE8iR5KWCaF8eFyl56HL3PPl90JMQBXzhwsFoWCPuwjfM5w6sW
+Ll//zynwxtlJ9CRz9c2vK6aJ8DRu3OfBKN1iiEcNEynksDnNXErn5xXKz3p5pYdq
+e9BLzUQCDYkBIgQQAQIADAUCT3inRgUDABJ1AAAKCRCXELibyletfGMKCADJ97qk
+geBntQ+tZtKSFyXznAugYQmbzJld8U6eGSQnQkM40Vd62UZLdA8MjlWKS8y4A4L2
+0cI14zs5tKG9Q72BxQOw5xkxlLASw1/8WeYEbw7ZA+sPG//q9v3kIkru3sv64mMA
+enZtxsykexRGyCumxLjzlAcL1drWJGUYE2Kl6uzQS7jb+3PNBloQvz6nb3YRZ+Cg
+Ly9D41SIK+fpnV8r4iqhu7r4LmAQ7Q1DF9aoGaYvn2+xLGyWHxJAUet4xkMNOLp6
+k9RF1nbNe4I/sqeCB25CZhCTEvHdjSGTD2yJR5jfoWkwO9w8DZG1Q9WrWqki4hSB
+l0cmcvO34pC1SJYziQEiBBABAgAMBQJPinQFBQMAEnUAAAoJEJcQuJvKV618CFEI
+AJp5BbcV7+JBMRSvkoUcAWDoJSP2ug9zGw5FB8J90PDefKWCKs5Tjayf2TvM5ntq
+5DE9SGaXbloIwa74FoZlgqlhMZ4AtY9Br+oyPJ5S844wpAmWMFc6NnEPFaHQkQ+b
+dJYpRVNd9lzagJP261P3S+S9T2UeHVdOJBgWIq9Mbs4lnZzWsnZfQ4Lsz0aPqe48
+tkU8hw+nflby994qIwNOlk/u+I/lJbNz5zDY91oscXTRl2jV1qBgKYwwCXxyB3j9
+fyVpRl+7QnqbTWcCICVFL+uuYpP0HjdoKNqhzEguAUQQLOB9msPTXfa2hG+32ZYg
+5pzI5V7GCHq0KO6u5Ctj3TGJASIEEAECAAwFAk+cQEEFAwASdQAACgkQlxC4m8pX
+rXzi7AgAx8wJzNdD7UlgdKmrAK//YqH7arSssb33Xf45sVHDpUVA454DXeBrZpi+
+zEuo03o5BhAuf38cwfbkV6jN1mC2N0FZfpy4v7RxHKLYr7tr6r+DRn1L1giX5ybx
+CgY0fLAxkwscWUKGKABWxkz9b/beEXaO2rMt+7DBUdpAOP5FNRQ8WLRWBcMGQiaT
+S4YcNDAiNkrSP8CMLQP+04hQjahxwCgBnksylciqz3Y5/MreybNnTOrdjVDsF0Oe
+t0uLOiWXUZV1FfaGIdb/oBQLg+e1B74p5+q3aF8YI97qAZpPa1qiQzWIDX8LX9QX
+EFyZ3mvqzGrxkFoocXleNPgWT8fRuokBIgQQAQIADAUCT64N/QUDABJ1AAAKCRCX
+ELibyletfDOGCACKfcjQlSxrWlEUrYYZpoBP7DE+YdlIGumt5l6vBmxmt/5OEhqr
++dWwuoiyC5tm9CvJbuZup8anWfFzTTJmPRPsmE4z7Ek+3CNMVM2wIynsLOt1pRFK
+4/5RNjRLbwI6EtoCQfpLcZJ//SB56sK4DoFKH28Ok4cplESPnoMqA3QafdSEA/FL
+qvZV/iPgtTz7vjQkMgrXAIUM4fvKe3iXkAExGXtmgdXHVFoKmHrxJ2DTSvM7/19z
+jGJeu2MhIKHyqEmCk6hLjxyCE5pAH59KlbAQOP1bS28xlRskBApm2wN+LOZWzC62
+HhEReQ50inCGuuubK0PqUQnyYc+lUFxrFpcliQEiBBABAgAMBQJPv9lVBQMAEnUA
+AAoJEJcQuJvKV618AzgH/iRFFCi4qjvoqji1fi7yNPZVOMMO2H13Ks+AfcjRtHuV
+aa30u50ND7TH+XQe6yerTapLh3aAm/sNP99aTxIuwRSlyKEoDs93+XVSgRqPBgbF
+/vxv0ykok3p6L9DxFO/w5cL8JrBhMZoJrEkIBFkwN8tWlcXPRFQvcdBYv3M3DTZU
+qY+UHnOxHvSzsl+LJ0S9Xcd9C5bvYfabmYJvG5eRS3pj1L/y3a6yw6hvY+JtnQAk
+t05TdeHMIgQH/zb8V9wxDzmE0un8LyoC2Jx5TpikQsJSejwK6b3coxVBlngku6+C
+qDAimObZLw6H9xYYIK0FoJs7j5bQZEwUO7OLBgjcMOqJASIEEAECAAwFAk/Rpc8F
+AwASdQAACgkQlxC4m8pXrXw49Qf/TdNbun2htQ+cRWarszOx8BLEiW/x6PVyUQpZ
+nV/0qvhKzlJUjM9hQPcA0AsOjhqtCN6Cy8KXbK/TvPm9D/Nk6HWwD1PomzrJVFk2
+ywGFIuTR+lluKSp7mzm5ym0wJs5cPq731Im31RUQU8ndjLrq9YOf5FVL8NqmcOAU
+4E8d68BbmVCQC5MMr0901FKwKznShfpy7VYN25/BASj8dhnynBYQErqToOJB6Cnd
+JhdTlbfR4SirqAYZZg3XeqGhByytEHE1x7FMWWFYhdNtsnAVhYBbWqAzBs8lF9Jd
+Mhaf0VQU/4z10gVrRtXLR/ixrCi+P4cM/fOQkqd6pwqWkaXt6okBIgQQAQIADAUC
+T+NxIAUDABJ1AAAKCRCXELibyletfFBBCAC6+0TUJDcNaqOxOG1KViY6KYg9NCL8
+pwNK+RKNK/N1V+WGJQH7qDMwRoOn3yogrHax4xIeOWiILrvHK0O6drS1DjsymIhR
+Sm2XbE/8pYmEbuJ9vHh3b/FTChmSAO7dDjSKdWD3dvaY8lSsuDDqPdTX8FzOfrXC
+M22C/YPg7oUG2A5svE1b+yismP4KmVNWAepEuPZcnEMPFgop3haHg9X2+mj/btDB
+Yr6p9kAgIY17nigtNTNjtI0dMLu43aIzedCYHqOlNHiB049jkJs54fMGBjF9qPtc
+m0k44xyKd1/JXWMdNUmtwKsChAXJS3YOciMgIx6tqYUTndrP4I6q1rfriQEiBBAB
+AgAMBQJP9T1VBQMAEnUAAAoJEJcQuJvKV618J9wIAI1lId9SMbEHF6PKXRe154lE
+pap5imMU/lGTj+9ZcXmlf8o2PoMMmb3/E1k+EZUaeSBoOmjS8C2gwd5XFwRrlwAD
+RlK/pG5XsL4h5wmN2fj1ororrJXvqH427PLRQK9yzdwG4+9HTBOxjoS8qZT9plyK
+AJZzAydAMqyseRHgNo0vMwlgrs4ojo+GcFGQHrF3IaUjvVfUPOmIj7afopFdIZmI
+GaSF0TXBzqcZ1chFv/eTBcIuIKRvlaDee5FgV7+nLH2nKOARCLvV/+8uDi2zbr83
+Ip5x2tD3XuUZ0ZWxD0AQWcrLdmGb4lkxbGxvCtsaJHaLXWQ2m760RjIUcwVMEBKJ
+ASIEEAECAAwFAlAGYWsFAwASdQAACgkQlxC4m8pXrXwyVAgAvuvEl6yuGkniWOlv
+uHEusUv/+2GCBg6qV+IEpVtbTCCgiFjYR5GasSp1gpZ5r4BocOlbGdjdJGHTpyK8
+xD1i+6qZWUYhNRg2POXUVzcNEl2hhouwPLOifcmTwAKU76TEv3L5STviL3hWgUR2
+yEUZ3Ut0IGVV6uPER9jpR3qd6O3PeuFkwf+NaGTye4jioLAy3aYwtZCUXzvYmNLP
+90K4y+5yauZteLmNeq26miKC/NQu4snNFClPbGRjHD1ex9KDiAMttOgN4WEq7srT
+rYgtT531WY4deHpNgoPlHPuAfC0H+S6YWuMbgfcb6dV+Rrd8Ij6zM3B/PcjmsYUf
+OPdPtIkBIgQQAQIADAUCUBgtfQUDABJ1AAAKCRCXELibyletfAm3CACQlw21Lfeg
+d8RmIITsfnFG/sfM3MvZcjVfEAtsY3fTK9NiyU0B3yX0PU3ei37qEW+50BzqiStf
+5VhNvLfbZR+yPou7o2MAP31mq3Uc6grpTV64BRIkCmRWg40WMjNI1hv7AN/0atgj
+ATYQXgnEw7mfFb0XZtMTD6cmrz/A9nTPVgZDxzopOMgCCC1ZK4Vpq9FKdCYUaHpX
+3sqnDf+gpVIHkTCMgWLYQOeX5Nl+fgnq6JppaQ3ySZRUDr+uFUs0uvDRvI/cn+ur
+ri92wdDnczjFumKvz/cLJAg5TG2Jv1Jx3wecALsVqQ3gL7f7vr1OMaqhI5FEBqdN
+29L9cZe/ZmkriQEiBBIBCgAMBQJVoNxyBYMHhh+AAAoJEEoz7NUmyPxLD1EH/2eh
+7a4+8A1lPLy2L9xcNt2bifLfFP2pEjcG6ulBoMKpHvuTCgtX6ZPdHpM7uUOje/F1
+CCN0IPB533U1NIoWIKndwNUJjughtoRM+caMUdYyc4kQm29Se6hMPDfyswXE5Bwe
+PmoOm4xWPVOH/cVN04zyLuxdlQZNQF/nJg6PMsz4w5z+K6NGGm24NEPcc72iv+6R
+Uc/ry/7v5cVu4hO5+r104mmNV5yLecQF13cHy2JlngIHXPSlxTZbeJX7qqxE7TQh
+5nviSPgdk89oB5jFSx4g1efXiwtLlP7lbDlxHduomyQuH9yqmPZMbkJt9uZDc8Zz
+MYsDDwlc7BIe5bGKfjqJAhwEEAECAAYFAlSanFIACgkQdzHqU52lcqLdvg//cAEP
+qdN5VTKWEoDFjDS4I6t8+0KzdDWDacVFwKJ8RAo1M2SklDxnIvnzysZd2VHp5Pq7
+i4LYCZo5lDkertQ6LwaQxc4X6myKY4LTA652ObFqsSfgh9kW+aJBBAyeahPQ8CDD
++Yl23+MY5wTsj4qt7KffNzy78vLbYnVnvRQ3/CboVix0SRzg0I3Oi7n3B0lihvXy
+5goy9ikjzZevejMEfjfeRCgoryy9j5RvHH9PF3fJVtUtHCS4f+kxLmbQJ1XqNDVD
+hlFzjz8oUzz/8YXy3im5MY7Zuq4P4wWiI7rkIFMjTYSpz/evxkVlkR74qOngT2pY
+VHLyJkqwh56i0aXcjMZiuu2cymUt2LB9IsaMyWBNJjXr2doRGMAfjuR5ZaittmML
+yZwix9mWVk7tkwlIxmT/IW6Np0qMhDZcWYqPRpf7+MqY3ZYMK4552b8aDMjhXrnO
+OwLsz+UI4bZa1r9dguIWIt2C2b5C1RQ9AsQBPwg7h5P+HhRuFAuDKK+vgV8FRuzR
+JeKkFqwB4y0Nv7BzKbFKmP+V+/krRv+/Dyz9Bz/jyAQgw02u1tPupH9BGhlRyluN
+yCJFTSNj7G+OLU0/l4XNph5OOC7sy+AMZcsL/gsT/TXCizRcCuApNTPDaenACpbv
+g8OoIzmNWhh4LXbAUHCKmY//hEw9PvTZA1xKHgyJAhwEEgECAAYFAlJYsKQACgkQ
+oirk60MpxUV2XQ//b2/uvThkkbeOegusDC4AZfjnL/V3mgk4iYy4AC9hum0R9oNl
+XDR51P1TEw9mC1btHj+7m7Iq1a5ke5wIC7ENZiilr0yPqeWgL5+LC98dz/L85hqA
+wIoGeOfMhrlaVbAZEj4yQTAJDA35vZHVsQmp87il0m+fZX04OBLXBzw86EoAAZ7Q
+EoH4qFcT9k1T363tvNnIm3mEvkQ5WjE1R9uchJa1g7hdlNQlVkjFmPZrJK9fl4z5
+6Dto89Po4Sge48jDH0pias4HATYHsxW819nz5jZzGcxLnFRRR5iITVZi9qzsHP7N
+bUh3qxuWCHS9xziXpOcSZY848xXw63Y5jDJfpzupzu/KHj6CzXYJUEEqp9MluoGb
+/BCCEPzdZ0ovyxFutM/BRcc6DvE6sTDF/UES21ROqfuwtJ6qJYWX+lBIgyCJvj4o
+RdbzxUleePuzqCzmwrIXtoOKW0Rlj4SCeF9yCwUMBTGW5/nCLmN4dwf1KW2RP2Eg
+4ERbuUy7QnwRP5UCl+0ISZJyYUISfg8fmPIdQsetUK9Cj+Q5jpB2GXwELXWnIK6h
+K/6jXp+EGEXSqdIE53vAFe7LwfHiP/D5M71D2h62sdIOmUm3lm7xMOnM5tKlBiV+
+4jJSUmriCT62zo710+6iLGqmUUYlEll6Ppvo8yuanXkYRCFJpSSP7VP0bBqIZgQT
+EQIAJgUCTnc9dgIbIwUJEPPzpwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEIxx
+jTtQcuH1Ut4AoIKjhdf70899d+7JFq3LD7zeeyI0AJ9Z+YyE1HZSnzYi73brScil
+bIV6sbQ7TXlTUUwgUGFja2FnZSBzaWduaW5nIGtleSAod3d3Lm15c3FsLmNvbSkg
+PGJ1aWxkQG15c3FsLmNvbT6IbwQwEQIALwUCTnc9rSgdIGJ1aWxkQG15c3FsLmNv
+bSB3aWxsIHN0b3Agd29ya2luZyBzb29uAAoJEIxxjTtQcuH1tT0An3EMrSjEkUv2
+9OX05JkLiVfQr0DPAJwKtL1ycnLPv15pGMvSzav8JyWN3IhlBBMRAgAdBQJHrJS0
+BQkNMFioBQsHCgMEAxUDAgMWAgECF4AAEgkQjHGNO1By4fUHZUdQRwABAa6SAJ9/
+PgZQSPNeQ6LvVVzCALEBJOBt7QCffgs+vWP18JutdZc7XiawgAN9vmmITAQTEQIA
+DAUCPj6j0QWDCWYAuwAKCRBJUOEqsnKR8iThAJ9ZsR4o37dNGyl77nEqP6RAlJqa
+YgCeNTPTEVY+VXHR/yjfyo0bVurRxT2ITAQTEQIADAUCPkKCAwWDCWIiiQAKCRC2
+9c1NxrokP5aRAKCIaaegaMyiPKenmmm8xeTJSR+fKQCgrv0TqHyvCRINmi6LPucx
+GKwfy7KIRgQQEQIABgUCP6zjrwAKCRCvxSNIeIN0D/aWAKDbUiEgwwAFNh2n8gGJ
+Sw/8lAuISgCdHMzLAS26NDP8T2iejsfUOR5sNriIRgQQEQIABgUCP7RDdwAKCRCF
+lq+rMHNOZsbDAJ0WoPV+tWILtZG3wYqg5LuHM03faQCeKuVvCmdPtro06xDzeeTX
+VrZ14+GIRgQQEQIABgUCQ1uz6gAKCRCL2C5vMLlLXH90AJ0QsqhdAqTAk3SBnO2w
+zuSOwiDIUwCdFExsdDtXf1cL3Q4ilo+OTdrTW2CIRgQTEQIABgUCRPEzJgAKCRD2
+ScT0YJNTDApxAKCJtqT9LCHFYfWKNGGBgKjka0zi9wCcCG3MvnvBzDUqDVebudUZ
+61Sont+ITAQQEQIADAUCQYHLAQWDBiLZiwAKCRAYWdAfZ3uh7EKNAJwPywk0Nz+Z
+Lybw4YNQ7H1UxZycaQCePVhY4P5CHGjeYj9SX2gQCE2SNx+ITAQQEQIADAUCQYHL
+NAWDBiLZWAAKCRCBwvfr4hO2kiIjAJ0VU1VQHzF7yYVeg+bh31nng9OOkwCeJI8D
+9mx8neg4wspqvgXRA8+t2saITAQQEQIADAUCQYHLYgWDBiLZKgAKCRBrcOzZXcP0
+cwmqAJsFjOvkY9c5eA/zyMrOZ1uPB6pd4QCdGyzgbYb/eoPu6FMvVI9PVIeNZReI
+TAQQEQIADAUCQdCTJAWDBdQRaAAKCRB9JcoKwSmnwmJVAKCG9a+Q+qjCzDzDtZKx
+5NzDW1+W+QCeL68seX8OoiXLQuRlifmPMrV2m9+ITAQQEQIADAUCQitbugWDBXlI
+0gAKCRDmG6SJFeu5q/MTAKCTMvlCQtLKlzD0sYdwVLHXJrRUvgCffmdeS6aDpwIn
+U0/yvYjg1xlYiuqITAQSEQIADAUCQCpZOgWDB3pLUgAKCRA8oR80lPr4YSZcAJwP
+4DncDk4YzvDvnRbXW6SriJn1yQCdEy+d0CqfdhM7HGUs+PZQ9mJKBKqITAQSEQIA
+DAUCQD36ugWDB2ap0gAKCRDy11xj45xlnLLfAKC0NzCVqrbTDRw25cUss14RRoUV
+PACeLpEc3zSahJUB0NNGTNlpwlTczlCITAQSEQIADAUCQQ4KhAWDBpaaCAAKCRA5
+yiv0PWqKX/zdAJ4hNn3AijtcAyMLrLhlZQvib551mwCgw6FEhGLjZ+as0W681luc
+wZ6PzW+ITAQSEQIADAUCQoClNAWDBSP/WAAKCRAEDcCFfIOfqOMkAJwPUDhS1eTz
+gnXclDKgf353LbjvXgCeLCWyyj/2d0gIk6SqzaPl2UcWrqiITAQTEQIADAUCPk1N
+hAWDCVdXCAAKCRAtu3a/rdTJMwUMAKCVPkbk1Up/kyPrlsVKU/Nv3bOTZACfW5za
+HX38jDCuxsjIr/084n4kw/uITAQTEQIADAUCQdeAdgWDBc0kFgAKCRBm79vIzYL9
+Pj+8AJ9d7rvGJIcHzTCSYVnaStv6jP+AEACeNHa5yltqieRBCCcLcacGqYK81omI
+TAQTEQIADAUCQhiBDgWDBYwjfgAKCRB2wQMcojFuoaDuAJ9CLYdysef7IsW42UfW
+hI6HjxkzSgCfeEpXS4hEmmGicdpRiJQ/W21aB0GIZQQTEQIAHQULBwoDBAMVAwID
+FgIBAheABQJLcC/KBQkQ8/OnABIHZUdQRwABAQkQjHGNO1By4fWw2wCeJilgEarL
+8eEyfDdYTyRdqE45HkoAnjFSZY8Zg/iXeErHI0r04BRukNVgiHsEMBECADsFAkJ3
+NfU0HQBPb3BzLi4uIHNob3VsZCBoYXZlIGJlZW4gbG9jYWwhIEknbSAqc28qIHN0
+dXBpZC4uLgAKCRA5yiv0PWqKX+9HAJ0WjTx/rqgouK4QCrOV/2IOU+jMQQCfYSC8
+JgsIIeN8aiyuStTdYrk0VWCIjwQwEQIATwUCRW8Av0gdAFNob3VsZCBoYXZlIGJl
+ZW4gYSBsb2NhbCBzaWduYXR1cmUsIG9yIHNvbWV0aGluZyAtIFdURiB3YXMgSSB0
+aGlua2luZz8ACgkQOcor9D1qil+g+wCfcFWoo5qUl4XTE9K8tH3Q+xGWeYYAnjii
+KxjtOXc0ls+BlqXxbfZ9uqBsiQIiBBABAgAMBQJBgcuFBYMGItkHAAoJEKrj5s5m
+oURoqC8QAIISudocbJRhrTAROOPoMsReyp46Jdp3iL1oFDGcPfkZSBwWh8L+cJjh
+dycIwwSeZ1D2h9S5Tc4EnoE0khsS6wBpuAuih5s//coRqIIiLKEdhTmNqulkCH5m
+imCzc5zXWZDW0hpLr2InGsZMuh2QCwAkB4RTBM+r18cUXMLV4YHKyjIVaDhsiPP/
+MKUj6rJNsUDmDq1GiJdOjySjtCFjYADlQYSD7zcd1vpqQLThnZBESvEoCqumEfOP
+xemNU6xAB0CL+pUpB40pE6Un6Krr5h6yZxYZ/N5vzt0Y3B5UUMkgYDSpjbulNvaU
+TFiOxEU3gJvXc1+h0BsxM7FwBZnuMA8LEA+UdQb76YcyuFBcROhmcEUTiducLu84
+E2BZ2NSBdymRQKSinhvXsEWlH6Txm1gtJLynYsvPi4B4JxKbb+awnFPusL8W+gfz
+jbygeKdyqzYgKj3M79R3geaY7Q75Kxl1UogiOKcbI5VZvg47OQCWeeERnejqEAdx
+EQiwGA/ARhVOP/1l0LQA7jg2P1xTtrBqqC2ufDB+v+jhXaCXxstKSW1lTbv/b0d6
+454UaOUV7RisN39pE2zFvJvY7bwfiwbUJVmYLm4rWJAEOJLIDtDRtt2h8JahDObm
+3CWkpadjw57S5v1c/mn+xV9yTgVx5YUfC/788L1HNKXfeVDq8zbAiQIiBBMBAgAM
+BQJCnwocBYMFBZpwAAoJENjCCglaJFfPIT4P/25zvPp8ixqV85igs3rRqMBtBsj+
+5EoEW6DJnlGhoi26yf1nasC2frVasWG7i4JIm0U3WfLZERGDjR/nqlOCEqsP5gS3
+43N7r4UpDkBsYh0WxH/ZtST5llFK3zd7XgtxvqKL98l/OSgijH2W2SJ9DGpjtO+T
+iegq7igtJzw7Vax9z/LQH2xhRQKZR9yernwMSYaJ72i9SyWbK3k0+e95fGnlR5pF
+zlGq320rYHgD7v9yoQ2t1klsAxK6e3b7Z+RiJG6cAU8o8F0kGxjWzF4v8D1op7S+
+IoRdB0Bap01ko0KLyt3+g4/33/2UxsW50BtfqcvYNJvU4bZns1YSqAgDOOanBhg8
+Ip5XPlDxH6J/3997n5JNj/nk5ojfd8nYfe/5TjflWNiput6tZ7frEki1wl6pTNbv
+V9C1eLUJMSXfDZyHtUXmiP9DKNpsucCUeBKWRKLqnsHLkLYydsIeUJ8+ciKc+EWh
+FxEY+Ml72cXAaz5BuW9L8KHNzZZfez/ZJabiARQpFfjOwAnmhzJ9r++TEKRLEr96
+taUI9/8nVPvT6LnBpcM38Td6dJ639YvuH3ilAqmPPw50YvglIEe4BUYD5r52Seqc
+8XQowouGOuBX4vs7zgWFuYA/s9ebfGaIw+uJd/56Xl9ll6q5CghqB/yt1EceFEnF
+CAjQc2SeRo6qzx22iEYEEBECAAYFAkSAbycACgkQCywYeUxD5vWDcACfQsVk/XGi
+ITFyFVQ3IR/3Wt7zqBMAoNhso/cX8VUfs2BzxPvvGS3y+5Q9iEYEEBECAAYFAkUw
+ntcACgkQOI4l6LNBlYkyFgCbBcw5gIii0RTDJsdNiuJDcu/NPqEAniSq9iTaLjgF
+HZbaizUU8arsVCB5iEYEEBECAAYFAkWho2sACgkQu9u2hBuwKr6bjwCfa7ZK6O+X
+mT08Sysg4DEoZnK4L9UAoLWgHuYg35wbZYx+ZUTh98diGU/miF0EExECAB0FAj4+
+owwFCQlmAYAFCwcKAwQDFQMCAxYCAQIXgAAKCRCMcY07UHLh9XGOAJ4pVME15/DG
+rUDohtGv2z8a7yv4AgCeKIp0jWUWE525QocBWms7ezxd6syIXQQTEQIAHQUCR6yU
+zwUJDTBYqAULBwoDBAMVAwIDFgIBAheAAAoJEIxxjTtQcuH1dCoAoLC6RtsD9K3N
+7NOxcp3PYOzH2oqzAKCFHn0jSqxk7E8by3sh+Ay8yVv0BYhdBBMRAgAdBQsHCgME
+AxUDAgMWAgECF4AFAkequSEFCQ0ufRUACgkQjHGNO1By4fUdtwCfRNcueXikBMy7
+tE2BbfwEyTLBTFAAnifQGbkmcARVS7nqauGhe1ED/vdgiF0EExECAB0FCwcKAwQD
+FQMCAxYCAQIXgAUCS3AuZQUJEPPyWQAKCRCMcY07UHLh9aA+AKCHDkOBKBrGb8tO
+g9BIub3LFhMvHQCeIOOot1hHHUlsTIXAUrD8+ubIeZaJARwEEgECAAYFAkvCIgMA
+CgkQ3PTrHsNvDi8eQgf/dSx0R9Klozz8iK79w00NOsdoJY0Na0NTFmTbqHg30XJo
+G62cXYgc3+TJnd+pYhYi5gyBixF/L8k/kPVPzX9W0YfwChZDsfTw0iDVmGxOswiN
+jzSo0lhWq86/nEL30Khl9AhCC1XFNRw8WZYq9Z1qUXHHJ2rDARaedvpKHOjzRY0N
+dx6R2zNyHDx2mlfCQ9wDchWEuJdAv0uHrQ0HV9+xq7lW/Q3L/V5AuU0tiowyAbBL
+PPYrB6x9vt2ZcXS7BOy8SfQ1i8W2QDQ/Toork4YwBiv6WCW/ociy7paAoPOWV/Nf
+2S6hDispeecbk7wqpbUj5klDmwrlgB/jmoAXWEnbsYkBIgQQAQIADAUCSSpooAUD
+ABJ1AAAKCRCXELibyletfFOMCACpP+OVZ7lH/cNY+373c4FnSI0/S5PXS0ABgdd4
+BFWRFWKrWBeXBGc8sZfHOzVEwkzV96iyHbpddeAOAkEA4OVPW1MMFCmlHxi2s9/N
+JrSrTPVfQOH5fR9hn7Hbpq/ETw0IoX1FKo7vndMnHZnFEnI+PDXLcdMYQgljYzhT
+xER4vYY0UKu8ekSshUy4zOX7XSJxwqPUvps8qs/TvojIF+vDJvgFYHVkgvS+shp8
+Oh/exg9vKETBlgU87Jgsqn/SN2LrR/Jhl0aLd0G0iQ+/wHmVYdQUMFaCZwk/BKNa
+XPzmGZEUZ3RNbYa19Mo7hcE3js76nh5YMxFvxbTggVu4kdFkiQEiBBABAgAMBQJK
+M06IBQMAEnUAAAoJEJcQuJvKV618F4gH/innejIHffGMk8jYix4ZZT7pW6ApyoI+
+N9Iy85H4L+8rVQrtcTHyq0VkcN3wPSwtfZszUF/0qP6P8sLJNJ1BtrHxLORYjJPm
+gveeyHPzA2oJl6imqWUTiW822fyjY/azwhvZFzxmvbFJ+r5N/Z57+Ia4t9LTSqTN
+HzMUYaXKDaAqzZeK7P0E6XUaaeygbjWjBLQ1O0ezozAy+Kk/gXApmDCGFuHSFe7Z
+mgtFcbXLM2XFQpMUooETD2R8MUsd+xnQsff/k6pQOLxi+jUEsWSr/iqmvlk6gZ4D
+pemBjuhcXYlxJYjUaX9Zmn5s+ofF4GFxRqXoY7l9Z+tCM9AX37lm6S+JASIEEAEC
+AAwFAkpEcgoFAwASdQAACgkQlxC4m8pXrXz2mgf/RQkpmMM+5r8znx2TpRAGHi5w
+ktvdFxlvPaOBWE28NDwTrpcoMqo9kzAiuvEQjVNihbP21wR3kvnQ84rTAH0mlC2I
+uyybggpqwzOUl+Wi0o+vk8ZA0A0dStWRN8uqneCsd1XnqDe1rvqC4/9yY223tLmA
+kPvz54ka2vX9GdJ3kxMWewhrVQSLCktQpygU0dujGTDqJtnk0WcBhVF9T87lv3W2
+eGdPielzHU5trXezmGFj21d56G5ZFK8co7RrTt4qdznt80glh1BTGmhLlzjMPLTe
+dcMusm3D1QB9ITogcG94ghSf9tEKmmRJ6OnnWM5Kn9KcL63E5oj2/lY9H54wSYkB
+IgQQAQIADAUCSlY+RwUDABJ1AAAKCRCXELibyletfOOQB/0dyJBiBjgf+8d3yNID
+pDktLhZYw8crIjPBVdOgX12xaUYBTGcQITRVHSggzffDA5BQXeUuWhpL4QB0uz1c
+EPPwSMiWiXlBtwF5q6RVf3PZGJ9fmFuTkPRO7SruZeVDo9WP8HjbQtOLukYf566e
+grzAYR9p74UgWftpDtmrqrRTobiuvsFBxosbeRCvEQCrN0n+p5D9hCVB88tUPHnO
+WA4mlduAFZDxQWTApKQ92frHiBqy+M1JFezz2OM3fYN+Dqo/Cb7ZwOAA/2dbwS7o
+y4sXEHbfWonjskgPQwFYB23tsFUuM4uZwVEbJg+bveglDsDStbDlfgArXSL/0+ak
+lFcHiQEiBBABAgAMBQJKaAqEBQMAEnUAAAoJEJcQuJvKV618rH0H/iCciD4U6YZN
+JBj0GN7/Xt851t9FWocmcaC+qtuXnkFhplXkxZVOCU4VBMs4GBoqfIvagbBTyfV4
+Di+W8Uxr+/1jiu3l/HvoFxwdwNkGG6zNBhWSjdwQpGwPvh5ryV1OfLX/mgQgdDmx
+vqz5+kFDUj4m7uLaeuU2j1T0lR4zU0yAsbt7J3hwfqJCXHOc9bm5nvJwMrSm+sdC
+TP5HjUlwHr9mTe8xuZvj6sO/w0P4AqIMxjC9W7pT9q0ofG2KSTwt7wFbh05sbG4U
+QYOJe4+Soh3+KjAa1c0cvmIh4cKX9qfCWwhhdeNfh1A9VTHhnl5zTv/UjvnQtjhl
+H/Fq1eBSKcSJASIEEAECAAwFAkp5LgoFAwASdQAACgkQlxC4m8pXrXwY6wgAg3f8
+76L3qDZTYlFAWs3pXBl8GsUr1DEkTlEDZMZKDM3wPmhaWBR1hMA3y6p3aaCUyJIJ
+BEneXzgyU9uqCxXpC78d5qc3xs/Jd/SswzNYuvuzLYOw5wN5L31SLmQTQ8KqE0uo
+RynBmtDCQ4M2UKifSnv+0+3mPh85LVAS481GNpL+VVfCYtKesWNu40+98Yg6L9NG
+WwRTfsQbcdokZo44Jz7Y7f81ObC4r/X1DgPj2+d4AU/plzDcdrbINOyprs+7340e
+cnaGO4Lsgd19b1CvcgJgltRquu3kRvd+Ero2RYpDv6GVK8Ea0Lto4+b/Ae8cLXAh
+QnaWQCEWmw+AU4Jbz4kBIgQQAQIADAUCSo5fvQUDABJ1AAAKCRCXELibyletfA08
+B/9w8yJdc8K+k07U30wR/RUg3Yb2lBDygmy091mVsyB0RGixBDXEPOXBqGKAXiV1
+QSMAXM2VKRsuKahY2HFkPbyhZtjbdTa7Pr/bSnPvRhAh9GNWvvRg2Kp3qXDdjv9x
+ywEghKVxcEIVXtNRvpbqRoKmHzIExvUQck5DM1VwfREeYIoxgs4035WADhVMdngQ
+S2Gt8P2WaU/p8EZhFGg6X8KtOlD68zGboaJe0hj2VDc+Jc+KdjRfE3fW5IToid/o
+DkUaIW6tB3WkXb0g6D/2hrEJbX3headChHKSB8eQdOR9bcCJDhhU8csd501qmrhC
+ctmvlpeWQZdIQdk6sABPWeeCiQEiBBABAgAMBQJKoBJHBQMAEnUAAAoJEJcQuJvK
+V618Ml8H/1D88/g/p9fSVor4Wu5WlMbg8zEAik3BIxQruEFWda6nART6M9E7e+P1
+++UHZsWYs6l9ROpWxRLG1Yy9jLec2Y3nUtb20m65p+IVeKR2a9PHW35WZDV9dOYP
+GZabKkO1clLeWLVgp9LRjZ+AeRG+ljHqsULXro1dwewLTB/gg9I2vgNv6dKxyKak
+nM/GrqZLATAq2KoaE/u/6lzRFZIzZnLtjZh8X7+nS+V8v9IiY4ntrpkrbvFk30U6
+WJp79oBIWwnW/84RbxutRoEwSar/TLwVRkcZyRXeJTapbnLGnQ/lDO1o1d7+Vbjd
+q/Sg/cKHHf7NthCwkQNsCnHL0f51gZCJASIEEAECAAwFAkqoEAAFAwASdQAACgkQ
+lxC4m8pXrXwE/Af/XD4R/A5R6Ir/nCvKwCTKJmalajssuAcLEa2pMnFZYO/8rzLO
++Gp8p0qFH9C4LFwA0NvR5q6X/swuROf4zxljSvNcdlQVaAfJ2ZDEgJ5GXzsPplrv
+SAI9jS3LL7fSWDZgKuUe0a4qx7A0NgyGMUYGhP+QlRFa8vWEBI9fANd/0mMqAeBV
+qQyOH0X1FiW1Ca2Jn4NKfuMy9GEvRddVIbB1LvoNVtXPNzeeKMyNb9Jdx1MFWssy
+COBP2DayJKTmjvqPEc/YOjOowoN5sJ/jn4mVSTvvlTooLiReSs6GSCAjMVxN7eYS
+/Oyq6Iu1JDcJvmB8N2WixAZtAVgF8OA7CWXKVYkBIgQQAQIADAUCSrnHiQUDABJ1
+AAAKCRCXELibyletfPChB/9uECti1dZeNuFsd0/RuGyRUVlrrhJE6WCcOrLO9par
+rPbewbKBmjSzB0MygJXGvcC06mPNuquJ7/WpxKsFmfg4vJBPlADFKtgRUy9BLzjC
+eotWchPHFBVW9ftPbaQViSUu7d89NLjDDM5xrh80puDIApxoQLDoIrh3T1kpZx56
+jSWv0gelFUMbXAzmqkJSyL4Xdh1aqzgUbREd7Xf2ICzuh0sV6V7c/AwWtjWEGEsA
+HZaiQDywZwbC18GwrMLiAzGWb/AScFDQRCZKJDjL+Ql8YT6z+ZMVr8gb7CIU5PKY
+dhiIf2UVTQwLAoW7lNRCQQAqcGjK3IMIz7SO/yk4HmVUiQEiBBABAgAMBQJK3gjG
+BQMAEnUAAAoJEJcQuJvKV618jkEH+wb0Zv9z7xQgpLMowVuBFQVu8/z7P5ASumyB
+PUO3+0JVxSHBhlCKQK7n11m1fhuGt2fCxXhSU6LzXj36rsKRY53lGZ9QhvqFUtQH
+3Xb2IQLIJC4UKjG2jSSCdcuA/x98bwp2v7O03rn7ndCS16CwXnRV3geQoNipRKMS
+DajKPpZv1RiZm8pMKqEb8WSw352xWoOcxuffjlsOEwvJ85SEGCAZ9tmIlkZOc7Ai
+QONDvii9b8AYhQ60RIQC0HP2ASSmK0V92VeFPxHmAygdDQgZNVtbVxgnnt7oTNEu
+VRXNY+z4OfBArp7R+cTsvijDRZY4kML1n22hUybwoxUEvjqZV2+JASIEEAECAAwF
+AkrvOlQFAwASdQAACgkQlxC4m8pXrXxrPAgArXiNgZirNuBhfNCXlkzkCHLx5wnV
+e4SmTpbWzTwWw7+qk7d4l9hlWtdImISORINzo7f4ShSUzJX2GciNaXhaHRo7+y5O
+Zbu82jQb09aQQj/nibKYuqxqUrobTEm+DuYz3JUQZm2PsPcHLS8mX9cxvrJUncPG
+nXEV0DRaq71SGWDprtkvBbp6i38aY3sIhYgz8wM5m1szKDtjywmBYcFehIdozt9z
+hm7wZshzRWQX1+Rf/pIsnk+OzBIa34crSemTnacbV/B7278z2XAyziPNFuqz0xu+
+iltOmYmayfNWAmumuw9NcuwWMlth6Mc2HLrpo0ZBheJ6iuDMPsHnwqdB/4kBIgQQ
+AQIADAUCSwBd2gUDABJ1AAAKCRCXELibyletfP6tB/4m1w0BtlkJgtS6E+B/ns14
+z4A4PGors+n+MYm05qzvi+EnDF/sytCmVcKeimrtvDcfoDtKAFFvJjcYXfnJdGWm
+Pu0SJMRL5KKCirAKwZmU/saxOgoB5QLNw+DHPteJ3w9GmWlGxIqG1r15WC5duzBC
+y3FsnjJYG3jaLnHOO9yXXb5h0kUTORfUKdvAr1gxF2KoatZWqGoaPPnHoqb88rjt
+zk8I7gDqoXnzh8wLxa0ZYvfTC/McxdWTrwXLft+krmMQ18iIZEne2hvVLNJVuluU
+oiWLeHA8iNCQ4W4WTdLc1mCnCjGTMX/MN41uLH0C9Ka4R6wEaqj4lPDk1B/1TV+Q
+iQEiBBABAgAMBQJLEYGrBQMAEnUAAAoJEJcQuJvKV618naIH/2t9aH5mBTKBN6fU
+qhrf79vIsjtI/QNS5qisBISZMX3/1/0Gu6WnxkPSfdCUJMWCjMcnVj7KU2wxTHHG
+VpAStd9r2afUNxRyqZwzwyytktuZok0XngAEDYDDBS3ssu2R4uWLCsC2ysXEqO/5
+tI5YrTWJZrfeIphTaYP5hxrMujvqy3kEwKKbiMz91cDeiLS+YCBcalj5n/1dMYf7
+8U8C6ieurxAg/L8h6x25VM4Ilx4MmG2T8QGtkkUXd+Fd/KYWmf0LE5LLPknf0Hhw
+oVslPXeinp4FsHK/5wzviv4YZpzuTqs9NlKcMsa4IuuPOB0FDf0pn+OFQbEg9QwY
+2gCozK+JASIEEAECAAwFAksjTdQFAwASdQAACgkQlxC4m8pXrXwlogf/XBGbXRVX
+LMaRN4SczOjwT3/tUCriTkb3v+zKjRG90zFhYAccjn7w+7jKQicjq6quQG1EH2X4
+/Su6ps1lDLqGHHhiJW3ZhxQScLZmhdAYsh2qG4GP/UW3QjXG7c61t+H3olvWg2cr
+wqCxxFZAgkAAkr9xcHWFZJEQeXoob6cCZObaUnHSANdmC6s5lUxXYa2bmL7Q3UB4
+4KCzDvAfbPZKJOw9k0qb3lc11zx+vGdyZFbm4R0+3LPp/vT0b3GlSbbF9lU1GOXh
+VaphrgFFa76dmjfHCkPplXAkK1VSIU/aPGAefduTFMdlSZpdMtJ5AULjGcszBDlR
+pLlPxvqVa0ZpgIkBIgQQAQIADAUCSycmkgUDABJ1AAAKCRCXELibyletfHlNCACp
+1YespiHfQt2alcscE5zgfETEHHic8Ai6pNkU9HT4TeWcFHEDe5QqfYcpjLrQvBXS
+kSvxEittbyRdv+e+j5Z+HyHjiG8nAQBL6qy9eHqQE4+d7gYs6DTk7sG9ZMYphREb
+ltzD+F4hVCQdLT8LNr0eVFN7ehqECScDaCG8/Qyti+l/0M902/Yn+mz0ilOiUdWJ
+9x6LPaIINtb1gsYDEylLjwGIZmI0r5Kh9wYoV4vnNezFbxO1uRiW0B7iaPjIEsbt
+OOKp7wx2aX+DM3N9F3BtaIY8XnzcnomNm83SNsgmgrZljpQltUnNqIhNM8DupQ+I
+WOV5gtl6pTC7CgeVTVyRiQEiBBABAgAMBQJLOGXuBQMAEnUAAAoJEJcQuJvKV618
+ll4IAKJ9mm4jb0c8fe9+uDI8eCJRbzNbVXm8zWzpA8GUtQAakwxoKv332QP1Wa1P
+odni/e3EMhsSREOZJJv79YqGxGRBTE9Kb/VjM34nas4XSnXKW28XWhKyIw+XwQAi
+nY2swFHh+83Htr/mwTdJfS2aEYl2zboBvd/JZCdhOGU2GH737S/3uEczoKkfVQ/w
+OTM8X1xWwlYWqx23k/DsGcuDs9lA2g7Mx7DSqBtVjaTkn9h0zATzXLDkmP4SAUVj
+cZ83WDpFre5WnizZjdXlBMM5OCexp5WpmzyHLTnaBFK4jEmnsk5C2Rnoyp8Ivz6g
+Ecg1tRbEXijRw++d2TFYlJwLKtiJASIEEAECAAwFAktKMicFAwASdQAACgkQlxC4
+m8pXrXxqHQgAuYY5scKrh0m/GS9EYnyC9494lOlO6iytU0CpE6oBC31M3hfX/Dbj
+UbcS5szZNU+2CPYo4ujQLZ7suN7+tTjG6pZFfMevajT9+jsL+NPMF8RLdLOVYmbl
+TmSQGNO+XGEYaKYH5oZIeIW5AKCgi2ozkdFlBBLAx7Kqo/FyybhkURFEcvEyVmgf
+3KLV7IIiX/fYLfoCMCJ/Lcm9/llSFB1n8Nvg66Xd533DKoHjueD3jyaNAVlo2mq/
+sIAv++kntvOiB3GDK5pfwHZ78WWiCpsWZpE5gzAnzJ1Y0WEigRo0PVLu3cLO0jLG
+23d+H/CbfZ8rkajHJeCDQF7YVmP0t0nYpYkBIgQQAQIADAUCS1v+ZgUDABJ1AAAK
+CRCXELibyletfNS/CACqt2TkB86mjqM+cJ74+dWBvJ2aFuURuxzm95i9Q/W/hU08
+2iMbC3+0k2oD8CrTOe61P+3oRyLjv/UEDUNzLncNe2YsA9JeV+4hvPwH5Vp3Om13
+089fCKZUbqslXNKkHiWYU+zAaZJXEuGRmRz0HbQIeAMOWF4oa226uo1e4ws1Jhc+
+F3E/ApCRyFBqBUdL05hapQLditYpsBjIdiBGpjzidMLE2wX2W4ZpAdN0U6BIyIqR
+mTPjbSkvzS9kSWFmfhQgnBDKEYJpVZgE1sN52rYC1sDeGeiuKxlzjVov9MMhYMWa
+Zo3R5o3F2iIM/BK6FbC252lf/Mhu3ICuXujNBZNYiQEiBBABAgAMBQJLbSH4BQMA
+EnUAAAoJEJcQuJvKV618kd0IAJLLwDH6gvgAlBFklQJXqQxUdcSOOVMAWtlHgWOy
+ozjgomZZBkRL8dtCDr9YBMcj5czcQ3qpmLJdppXhKB+kJV2iUXfDMSFXwJ4wLfIs
+8FNnXw8H5U01oBkGH/Ku6ngL9Vwt+MjYHtCWkw9QueUKZnDudX9qIzLAIt+mwSTu
+A6+fY4VWIg40AA0v3exaQM55YR/UhlKunpGG9o8Qkq77dMEbTMpOmBoLbOMRB3Dd
+MAvVU6G2l6Pcb7KobVCuOBnb6batXARV/G8sw+nzfJ16fr/KobZT2A6m+Jrqk4dl
+F14ljLbz16O5JGUPAryN2G2ddBdSAy7dtFSVhWWiWC9n88q5Ag0EPj6jHRAIAO/h
+iX8WzHWOMLJT54x/axeDdqn1rBDf5cWmaCWHN2ujNNlgpx5emoU9v7QStsNUCOGB
+bXkeO4Ar7YG+jtSR33zqNh3y5kQ0YkY3dQ0wh6nsl+wh4XIIY/3TUZVtmdJeUBRH
+JlfVNFYad2hX1guFI37Ny1PoZAFsxO82g+XB/Se8r/+sbmVcONdcdIeFKrE3FjLt
+IjNQcxC6l9Q2Oy8KDxG/zvUZG3+H5i3tdRMyGgmuD6gEV0GXOHYUopzLeit1+Aa0
+bCk36Mwbu+BeOw/CJW3+b0mB27hOaf9aCA855IP6fJFvtxcblq8nHIqhU3Dc9tec
+sl9/S1xZ5S8ylG/xeRsAAwUH/i8KqmvAhq0X7DgCcYputwh37cuZlHOa1Ep07JRm
+BCDgkdQXkGrsj2Wzw7Aw/TGdWWkmn2pxb8BRui5cfcZFO7c6vryi6FpJuLucX975
++eVY50ndWkPXkJ1HF4i+HJwRqE2zliN/RHMs4LJcwXQvvjD43EE3AO6eiVFbD+qA
+AdxUFoOeLblKNBHPG7DPG9xL+Ni5rkE+TXShxsB7F0z7ZdJJZOG0JODmox7IstQT
+GoaU9u41oyZTIiXPiFidJoIZCh7fdurP8pn3X+R5HUNXMr7M+ba8lSNxce/F3kmH
+0L7rsKqdh9d/aVxhJINJ+inVDnrXWVoXu9GBjT8Nco1iU9SIVAQYEQIADAUCTnc9
+7QUJE/sBuAASB2VHUEcAAQEJEIxxjTtQcuH1FJsAmwWK9vmwRJ/y9gTnJ8PWf0BV
+roUTAKClYAhZuX2nUNwH4vlEJQHDqYa5yQ==
+=ghXk
+-----END PGP PUBLIC KEY BLOCK-----
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx-badbots.filter b/packer/jambonz-mini/gcp/files/nginx-badbots.filter
new file mode 100644
index 0000000..12d4105
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-badbots.filter
@@ -0,0 +1,24 @@
+# Fail2Ban configuration file
+#
+# Regexp to catch known spambots and software alike. Please verify
+# that it is your intent to block IPs which were driven by
+# above mentioned bots.
+
+
+[Definition]
+
+badbotscustom = EmailCollector|WebEMailExtrac|TrackBack/1\.02|sogou music spider|(?:Mozilla/\d+\.\d+ )?Jorgee
+badbots = Atomic_Email_Hunter/4\.0|atSpider/1\.0|autoemailspider|bwh3_user_agent|China Local Browse 2\.6|ContactBot/0\.2|ContentSmartz|DataCha0s/2\.0|DBrowse 1\.4b|DBrowse 1\.4d|Demo Bot DOT 16b|Demo Bot Z 16b|DSurf15a 01|DSurf15a 71|DSurf15a 81|DSurf15a VA|EBrowse 1\.4b|Educate Search VxB|EmailSiphon|EmailSpider|EmailWolf 1\.00|ESurf15a 15|ExtractorPro|Franklin Locator 1\.8|FSurf15a 01|Full Web Bot 0416B|Full Web Bot 0516B|Full Web Bot 2816B|Guestbook Auto Submitter|Industry Program 1\.0\.x|ISC Systems iRc Search 2\.1|IUPUI Research Bot v 1\.9a|LARBIN-EXPERIMENTAL \(efp@gmx\.net\)|LetsCrawl\.com/1\.0 \+http\://letscrawl\.com/|Lincoln State Web Browser|LMQueueBot/0\.2|LWP\:\:Simple/5\.803|Mac Finder 1\.0\.xx|MFC Foundation Class Library 4\.0|Microsoft URL Control - 6\.00\.8xxx|Missauga Locate 1\.0\.0|Missigua Locator 1\.9|Missouri College Browse|Mizzu Labs 2\.2|Mo College 1\.9|MVAClient|Mozilla/2\.0 \(compatible; NEWT ActiveX; Win32\)|Mozilla/3\.0 \(compatible; Indy Library\)|Mozilla/3\.0 \(compatible; scan4mail \(advanced version\) http\://www\.peterspages\.net/?scan4mail\)|Mozilla/4\.0 \(compatible; Advanced Email Extractor v2\.xx\)|Mozilla/4\.0 \(compatible; Iplexx Spider/1\.0 http\://www\.iplexx\.at\)|Mozilla/4\.0 \(compatible; MSIE 5\.0; Windows NT; DigExt; DTS Agent|Mozilla/4\.0 efp@gmx\.net|Mozilla/5\.0 \(Version\: xxxx Type\:xx\)|NameOfAgent \(CMS Spider\)|NASA Search 1\.0|Nsauditor/1\.x|PBrowse 1\.4b|PEval 1\.4b|Poirot|Port Huron Labs|Production Bot 0116B|Production Bot 2016B|Production Bot DOT 3016B|Program Shareware 1\.0\.2|PSurf15a 11|PSurf15a 51|PSurf15a VA|psycheclone|RSurf15a 41|RSurf15a 51|RSurf15a 81|searchbot admin@google\.com|ShablastBot 1\.0|snap\.com beta crawler v0|Snapbot/1\.0|Snapbot/1\.0 \(Snap Shots, \+http\://www\.snap\.com\)|sogou develop spider|Sogou Orion spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sogou spider|Sogou web spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sohu agent|SSurf15a 11 |TSurf15a 11|Under the Rainbow 2\.2|User-Agent\: Mozilla/4\.0 \(compatible; MSIE 6\.0; Windows NT 5\.1\)|VadixBot|WebVulnCrawl\.unknown/1\.0 libwww-perl/5\.803|Wells Search II|WEP Search 00
+
+failregex = ^ -.*"(GET|POST|HEAD).*HTTP.*"(?:%(badbots)s|%(badbotscustom)s)"$
+
+ignoreregex =
+
+datepattern = ^[^\[]*\[({DATE})
+ {^LN-BEG}
+
+# DEV Notes:
+# List of bad bots fetched from http://www.user-agents.org
+# Generated on Thu Nov 7 14:23:35 PST 2013 by files/gen_badbots.
+#
+# Author: Yaroslav Halchenko
diff --git a/packer/jambonz-mini/gcp/files/nginx-badbots.jail b/packer/jambonz-mini/gcp/files/nginx-badbots.jail
new file mode 100644
index 0000000..318ebe1
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-badbots.jail
@@ -0,0 +1,8 @@
+[nginx-badbots]
+
+enabled = true
+port = http,https
+filter = nginx-badbots
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/gcp/files/nginx-nohome.jail b/packer/jambonz-mini/gcp/files/nginx-nohome.jail
new file mode 100644
index 0000000..ad80d5a
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-nohome.jail
@@ -0,0 +1,8 @@
+[nginx-nohome]
+
+enabled = true
+port = http,https
+filter = nginx-nohome
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/gcp/files/nginx-noproxy.filter b/packer/jambonz-mini/gcp/files/nginx-noproxy.filter
new file mode 100644
index 0000000..e6742db
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-noproxy.filter
@@ -0,0 +1,5 @@
+[Definition]
+
+failregex = ^ -.*GET http.*
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx-noproxy.jail b/packer/jambonz-mini/gcp/files/nginx-noproxy.jail
new file mode 100644
index 0000000..0760f23
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-noproxy.jail
@@ -0,0 +1,8 @@
+[nginx-noproxy]
+
+enabled = true
+port = http,https
+filter = nginx-noproxy
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/gcp/files/nginx-noscript.filter b/packer/jambonz-mini/gcp/files/nginx-noscript.filter
new file mode 100644
index 0000000..dddf94d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-noscript.filter
@@ -0,0 +1,5 @@
+[Definition]
+
+failregex = ^ -.*GET.*(\.php|\.asp|\.exe|\.pl|\.cgi|\.scgi)
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx-noscript.jail b/packer/jambonz-mini/gcp/files/nginx-noscript.jail
new file mode 100644
index 0000000..a21180d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx-noscript.jail
@@ -0,0 +1,8 @@
+[nginx-noscript]
+
+enabled = true
+port = http,https
+filter = nginx-noscript
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/gcp/files/nginx.api b/packer/jambonz-mini/gcp/files/nginx.api
new file mode 100644
index 0000000..3b79aee
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx.api
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name api.your_domain.com; # enter the app sub-domain that you setup in 11
+ location / {
+ proxy_pass http://localhost:3000; # point the reverse proxy to the api server on port 3000
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx.default b/packer/jambonz-mini/gcp/files/nginx.default
new file mode 100644
index 0000000..4de5bd0
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx.default
@@ -0,0 +1,54 @@
+server {
+ listen 80;
+ server_name _;
+
+ location /api/ {
+ rewrite ^/api/(.*)$ /$1 break;
+ proxy_pass http://localhost:3002;
+ proxy_set_header Host $host;
+ }
+
+ location / {
+ proxy_pass http://localhost:3001;
+ proxy_set_header Host $host;
+ }
+}
+
+server {
+ listen 80;
+ server_name grafana.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3010;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
+
+server {
+ listen 80;
+ server_name homer.your_domain.com;
+ location / {
+ proxy_pass http://localhost:9080;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
+
+server {
+ listen 80;
+ server_name jaeger.your_domain.com;
+ location / {
+ proxy_pass http://localhost:16686;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx.grafana b/packer/jambonz-mini/gcp/files/nginx.grafana
new file mode 100644
index 0000000..010d3fd
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx.grafana
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name grafana.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx.homer b/packer/jambonz-mini/gcp/files/nginx.homer
new file mode 100644
index 0000000..df03320
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx.homer
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name homer.your_domain.com;
+ location / {
+ proxy_pass http://localhost:9080;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/nginx.public-apps b/packer/jambonz-mini/gcp/files/nginx.public-apps
new file mode 100644
index 0000000..2e99ccf
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/nginx.public-apps
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name public-apps.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3010;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/rtpengine-recording.ini b/packer/jambonz-mini/gcp/files/rtpengine-recording.ini
new file mode 100644
index 0000000..2b8d5c8
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/rtpengine-recording.ini
@@ -0,0 +1,50 @@
+[rtpengine-recording]
+
+table = 42
+
+log-level = 5
+
+### number of worker threads (default 8)
+# num-threads = 16
+
+### where to forward to (unix socket)
+# forward-to = /run/rtpengine/sock
+
+### where to store recordings: file (default), db, both
+output-storage = file
+
+### format of stored recordings: wav (default), mp3
+# output-format = mp3
+# output-format = pcma
+output-format = wav
+
+### directory containing rtpengine metadata files
+spool-dir = /var/spool/recording
+
+### where to store media files to
+output-dir = /tmp/recordings
+
+### File name pattern to be used for recording files
+output-pattern = %Y%m%d%H00/rtpengine-%c-%t-M%S%u
+
+### resample all output audio
+resample-to = 8000
+
+### bits per second for MP3 encoding
+# mp3_bitrate = 24000
+
+### mix participating sources into a single output
+output-mixed = true
+
+### create one output file for each source
+# output-single = false
+
+### mix method: direct (mix input) channels (multi-channel)
+mix-method = direct
+
+### mysql configuration for db storage
+# mysql-host = localhost
+# mysql-port = 3306
+# mysql-user = rtpengine
+# mysql-pass = secret
+# mysql-db = rtpengine
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/files/rtpengine-recording.service b/packer/jambonz-mini/gcp/files/rtpengine-recording.service
new file mode 100644
index 0000000..690ab9d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/rtpengine-recording.service
@@ -0,0 +1,29 @@
+
+[Unit]
+Description=rtpengine-recording
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStart=/usr/local/bin/rtpengine-recording --config-file=/etc/rtpengine-recording.ini
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/rtpengine.gcp.service b/packer/jambonz-mini/gcp/files/rtpengine.gcp.service
new file mode 100644
index 0000000..2f08f1a
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/rtpengine.gcp.service
@@ -0,0 +1,48 @@
+
+[Unit]
+Description=rtpengine
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip`'
+ExecStartPre=/bin/sh -c 'systemctl set-environment PUBLIC_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip`'
+ExecStartPre=echo 'del 42' > /proc/rtpengine/control
+ExecStart=/usr/local/bin/rtpengine \
+--interface private/${LOCAL_IP} \
+--interface public/${LOCAL_IP}!${PUBLIC_IP} \
+--listen-ng=22222 \
+--listen-http=8080 \
+--listen-udp=12222 \
+--dtmf-log-dest=127.0.0.1:22223 \
+--listen-cli=127.0.0.1:9900 \
+--table=42 \
+--pidfile /run/rtpengine.pid \
+--port-min 40000 \
+--port-max 60000 \
+--recording-dir /var/spool/recording \
+--recording-method proc \
+--log-level 5 \
+--delete-delay 0
+PIDFile=/run/rtpengine.pid
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/rtpengine.service b/packer/jambonz-mini/gcp/files/rtpengine.service
new file mode 100644
index 0000000..cf66f52
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/rtpengine.service
@@ -0,0 +1,48 @@
+
+[Unit]
+Description=rtpengine
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s http://169.254.169.254/latest/meta-data/local-ipv4`'
+ExecStartPre=/bin/sh -c 'systemctl set-environment PUBLIC_IP=`curl -s http://169.254.169.254/latest/meta-data/public-ipv4`'
+ExecStartPre=echo 'del 42' > /proc/rtpengine/control
+ExecStart=/usr/local/bin/rtpengine \
+--interface private/${LOCAL_IP} \
+--interface public/${LOCAL_IP}!${PUBLIC_IP} \
+--listen-ng=22222 \
+--listen-http=8080 \
+--listen-udp=12222 \
+--dtmf-log-dest=127.0.0.1:22223 \
+--listen-cli=127.0.0.1:9900 \
+--table=42 \
+--pidfile /run/rtpengine.pid \
+--port-min 40000 \
+--port-max 60000 \
+--recording-dir /var/spool/recording \
+--recording-method proc \
+--log-level 5 \
+--delete-delay 0
+PIDFile=/run/rtpengine.pid
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/gcp/files/switch.conf.xml b/packer/jambonz-mini/gcp/files/switch.conf.xml
new file mode 100644
index 0000000..0e67ec1
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/switch.conf.xml
@@ -0,0 +1,184 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/switch_core_media.c.patch b/packer/jambonz-mini/gcp/files/switch_core_media.c.patch
new file mode 100644
index 0000000..3d86c06
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/switch_core_media.c.patch
@@ -0,0 +1,11 @@
+--- switch_core_media.c 2022-03-04 19:02:35.000000000 -0500
++++ switch_core_media.c.new 2022-03-05 08:54:38.000000000 -0500
+@@ -2749,7 +2749,7 @@
+ *(buf + datalen) = '\0';
+
+ while (*buf & 0x80) {
+- if (buf + 3 > e) {
++ if (buf + 3 > e || count >= MAX_RED_FRAMES) {
+ *new_datalen = 0;
+ return 0;
+ }
diff --git a/packer/jambonz-mini/gcp/files/switch_rtp.c.patch b/packer/jambonz-mini/gcp/files/switch_rtp.c.patch
new file mode 100644
index 0000000..ee9debf
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/switch_rtp.c.patch
@@ -0,0 +1,40 @@
+--- switch_rtp.c 2021-01-12 02:11:42.334172596 +0000
++++ switch_rtp.c.new 2021-01-12 02:12:42.695207260 +0000
+@@ -5639,8 +5639,8 @@
+ static switch_size_t do_flush(switch_rtp_t *rtp_session, int force, switch_size_t bytes_in)
+ {
+ int was_blocking = 0;
+- switch_size_t bytes;
+- uint32_t flushed = 0;
++ //switch_size_t bytes;
++ //uint32_t flushed = 0;
+ switch_size_t bytes_out = 0;
+
+ if (!switch_rtp_ready(rtp_session)) {
+@@ -5700,7 +5700,7 @@
+ #endif
+ handle_rfc2833(rtp_session, bytes_in, &do_cng);
+ }
+-
++/*
+ do {
+ if (switch_rtp_ready(rtp_session)) {
+ bytes = sizeof(rtp_msg_t);
+@@ -5713,7 +5713,7 @@
+ rtp_session->last_media = switch_micro_time_now();
+ }
+
+- /* Make sure to handle RFC2833 packets, even if we're flushing the packets */
++ //Make sure to handle RFC2833 packets, even if we're flushing the packets
+ if (bytes > rtp_header_len && rtp_session->recv_msg.header.version == 2 && rtp_session->recv_msg.header.pt == rtp_session->recv_te) {
+ rtp_session->last_rtp_hdr = rtp_session->recv_msg.header;
+ handle_rfc2833(rtp_session, bytes, &do_cng);
+@@ -5732,7 +5732,7 @@
+ break;
+ }
+ } while (bytes > 0);
+-
++*/
+ #ifdef DEBUG_2833
+ if (flushed) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "*** do_flush: total flushed packets: %ld ***\n",(long)flushed);
diff --git a/packer/jambonz-mini/gcp/files/telegraf.conf b/packer/jambonz-mini/gcp/files/telegraf.conf
new file mode 100644
index 0000000..3aa5be4
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/telegraf.conf
@@ -0,0 +1,7531 @@
+# Telegraf Configuration
+#
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+#
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+#
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+#
+# Environment variables can be used anywhere in this config file, simply surround
+# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
+# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
+
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+ role = "mini"
+ # dc = "us-east-1" # will tag all metrics with dc=us-east-1
+ # rack = "1a"
+ ## Environment variables can be used as tags, and throughout the config file
+ # user = "$USER"
+
+
+# Configuration for telegraf agent
+[agent]
+ ## Default data collection interval for all inputs
+ interval = "10s"
+ ## Rounds collection interval to 'interval'
+ ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+ round_interval = true
+
+ ## Telegraf will send metrics to outputs in batches of at most
+ ## metric_batch_size metrics.
+ ## This controls the size of writes that Telegraf sends to output plugins.
+ metric_batch_size = 1000
+
+ ## Maximum number of unwritten metrics per output. Increasing this value
+ ## allows for longer periods of output downtime without dropping metrics at the
+ ## cost of higher maximum memory usage.
+ metric_buffer_limit = 10000
+
+ ## Collection jitter is used to jitter the collection by a random amount.
+ ## Each plugin will sleep for a random time within jitter before collecting.
+ ## This can be used to avoid many plugins querying things like sysfs at the
+ ## same time, which can have a measurable effect on the system.
+ collection_jitter = "0s"
+
+ ## Default flushing interval for all outputs. Maximum flush_interval will be
+ ## flush_interval + flush_jitter
+ flush_interval = "10s"
+ ## Jitter the flush interval by a random amount. This is primarily to avoid
+ ## large write spikes for users running a large number of telegraf instances.
+ ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+ flush_jitter = "0s"
+
+ ## By default or when set to "0s", precision will be set to the same
+ ## timestamp order as the collection interval, with the maximum being 1s.
+ ## ie, when interval = "10s", precision will be "1s"
+ ## when interval = "250ms", precision will be "1ms"
+ ## Precision will NOT be used for service inputs. It is up to each individual
+ ## service input to set the timestamp at the appropriate precision.
+ ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
+ precision = ""
+
+ ## Log at debug level.
+ # debug = false
+ ## Log only error level messages.
+ # quiet = false
+
+ ## Log target controls the destination for logs and can be one of "file",
+ ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ ## is determined by the "logfile" setting.
+ # logtarget = "file"
+
+ ## Name of the file to be logged to when using the "file" logtarget. If set to
+ ## the empty string then logs are written to stderr.
+ # logfile = ""
+
+ ## The logfile will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed. Logs are rotated only when
+ ## written to, if there is no log activity rotation may be delayed.
+ # logfile_rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # logfile_rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # logfile_rotation_max_archives = 5
+
+ ## Override default hostname, if empty use os.Hostname()
+ hostname = ""
+ ## If set to true, do no set the "host" tag in the telegraf agent.
+ omit_hostname = false
+
+
+###############################################################################
+# OUTPUT PLUGINS #
+###############################################################################
+
+
+# Configuration for sending metrics to InfluxDB
+[[outputs.influxdb]]
+ urls = ["http://127.0.0.1:8086/"] # required
+ database = "telegraf" # required
+ retention_policy = "autogen"
+ write_consistency = "any"
+ timeout = "5s"
+ namedrop = ["hep*"]
+
+[[outputs.influxdb]]
+ urls = ["http://127.0.0.1:8086/"] # required
+ database = "homer" # required
+ retention_policy = ""
+ write_consistency = "any"
+ timeout = "5s"
+ namepass = ["hep*"]
+
+ ## The full HTTP or UDP URL for your InfluxDB instance.
+ ##
+ ## Multiple URLs can be specified for a single cluster, only ONE of the
+ ## urls will be written to each interval.
+ # urls = ["unix:///var/run/influxdb.sock"]
+ # urls = ["udp://127.0.0.1:8089"]
+ # urls = ["http://127.0.0.1:8086"]
+
+ ## The target database for metrics; will be created as needed.
+ ## For UDP url endpoint database needs to be configured on server side.
+ # database = "telegraf"
+
+ ## The value of this tag will be used to determine the database. If this
+ ## tag is not set the 'database' option is used as the default.
+ # database_tag = ""
+
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
+ ## If true, no CREATE DATABASE queries will be sent. Set to true when using
+ ## Telegraf with a user without permissions to create databases or when the
+ ## database already exists.
+ # skip_database_creation = false
+
+ ## Name of existing retention policy to write to. Empty string writes to
+ ## the default retention policy. Only takes effect when using HTTP.
+ # retention_policy = ""
+
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
+ ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
+ ## Only takes effect when using HTTP.
+ # write_consistency = "any"
+
+ ## Timeout for HTTP messages.
+ # timeout = "5s"
+
+ ## HTTP Basic Auth
+ # username = "telegraf"
+ # password = "metricsmetricsmetricsmetrics"
+
+ ## HTTP User-Agent
+ # user_agent = "telegraf"
+
+ ## UDP payload size is the maximum packet size to send.
+ # udp_payload = "512B"
+
+ ## Optional TLS Config for use on HTTP connections.
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
+ ## HTTP Proxy override, if unset values the standard proxy environment
+ ## variables are consulted to determine which proxy, if any, should be used.
+ # http_proxy = "http://corporate.proxy:3128"
+
+ ## Additional HTTP headers
+ # http_headers = {"X-Special-Header" = "Special-Value"}
+
+ ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+ ## compress body or "identity" to apply no encoding.
+ # content_encoding = "gzip"
+
+ ## When true, Telegraf will output unsigned integers as unsigned values,
+ ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
+ ## integer values. Enabling this option will result in field type errors if
+ ## existing data has been written.
+ # influx_uint_support = false
+
+
+# # Configuration for Amon Server to send metrics to.
+# [[outputs.amon]]
+# ## Amon Server Key
+# server_key = "my-server-key" # required.
+#
+# ## Amon Instance URL
+# amon_instance = "https://youramoninstance" # required
+#
+# ## Connection timeout.
+# # timeout = "5s"
+
+
+# # Publishes metrics to an AMQP broker
+# [[outputs.amqp]]
+# ## Broker to publish to.
+# ## deprecated in 1.7; use the brokers option
+# # url = "amqp://localhost:5672/influxdb"
+#
+# ## Brokers to publish to. If multiple brokers are specified a random broker
+# ## will be selected anytime a connection is established. This can be
+# ## helpful for load balancing when not using a dedicated load balancer.
+# brokers = ["amqp://localhost:5672/influxdb"]
+#
+# ## Maximum messages to send over a connection. Once this is reached, the
+# ## connection is closed and a new connection is made. This can be helpful for
+# ## load balancing when not using a dedicated load balancer.
+# # max_messages = 0
+#
+# ## Exchange to declare and publish to.
+# exchange = "telegraf"
+#
+# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
+# # exchange_type = "topic"
+#
+# ## If true, exchange will be passively declared.
+# # exchange_passive = false
+#
+# ## Exchange durability can be either "transient" or "durable".
+# # exchange_durability = "durable"
+#
+# ## Additional exchange arguments.
+# # exchange_arguments = { }
+# # exchange_arguments = {"hash_property" = "timestamp"}
+#
+# ## Authentication credentials for the PLAIN auth_method.
+# # username = ""
+# # password = ""
+#
+# ## Auth method. PLAIN and EXTERNAL are supported
+# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+# ## described here: https://www.rabbitmq.com/plugins.html
+# # auth_method = "PLAIN"
+#
+# ## Metric tag to use as a routing key.
+# ## ie, if this tag exists, its value will be used as the routing key
+# # routing_tag = "host"
+#
+# ## Static routing key. Used when no routing_tag is set or as a fallback
+# ## when the tag specified in routing tag is not found.
+# # routing_key = ""
+# # routing_key = "telegraf"
+#
+# ## Delivery Mode controls if a published message is persistent.
+# ## One of "transient" or "persistent".
+# # delivery_mode = "transient"
+#
+# ## InfluxDB database added as a message header.
+# ## deprecated in 1.7; use the headers option
+# # database = "telegraf"
+#
+# ## InfluxDB retention policy added as a message header
+# ## deprecated in 1.7; use the headers option
+# # retention_policy = "default"
+#
+# ## Static headers added to each published message.
+# # headers = { }
+# # headers = {"database" = "telegraf", "retention_policy" = "default"}
+#
+# ## Connection timeout. If not provided, will default to 5s. 0s means no
+# ## timeout (not recommended).
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## If true use batch serialization format instead of line based delimiting.
+# ## Only applies to data formats which are not line based such as JSON.
+# ## Recommended to set to true.
+# # use_batch_format = false
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# ##
+# ## Please note that when use_batch_format = false each amqp message contains only
+# ## a single metric, it is recommended to use compression with batch format
+# ## for best results.
+# # content_encoding = "identity"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Send metrics to Azure Application Insights
+# [[outputs.application_insights]]
+# ## Instrumentation key of the Application Insights resource.
+# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
+#
+# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
+# # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
+#
+# ## Timeout for closing (default: 5s).
+# # timeout = "5s"
+#
+# ## Enable additional diagnostic logging.
+# # enable_diagnostic_logging = false
+#
+# ## Context Tag Sources add Application Insights context tags to a tag value.
+# ##
+# ## For list of allowed context tag keys see:
+# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
+# # [outputs.application_insights.context_tag_sources]
+# # "ai.cloud.role" = "kubernetes_container_name"
+# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
+
+
+# # Send aggregate metrics to Azure Monitor
+# [[outputs.azure_monitor]]
+# ## Timeout for HTTP writes.
+# # timeout = "20s"
+#
+# ## Set the namespace prefix, defaults to "Telegraf/".
+# # namespace_prefix = "Telegraf/"
+#
+# ## Azure Monitor doesn't have a string value type, so convert string
+# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
+# ## a maximum of 10 dimensions so Telegraf will only send the first 10
+# ## alphanumeric dimensions.
+# # strings_as_dimensions = false
+#
+# ## Both region and resource_id must be set or be available via the
+# ## Instance Metadata service on Azure Virtual Machines.
+# #
+# ## Azure Region to publish metrics against.
+# ## ex: region = "southcentralus"
+# # region = ""
+# #
+# ## The Azure Resource ID against which metric will be logged, e.g.
+# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/"
+# # resource_id = ""
+#
+# ## Optionally, if in Azure US Government, China or other sovereign
+# ## cloud environment, set appropriate REST endpoint for receiving
+# ## metrics. (Note: region may be unused in this context)
+# # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
+
+
+# # Publish Telegraf metrics to a Google Cloud PubSub topic
+# [[outputs.cloud_pubsub]]
+# ## Required. Name of Google Cloud Platform (GCP) Project that owns
+# ## the given PubSub topic.
+# project = "my-project"
+#
+# ## Required. Name of PubSub topic to publish metrics to.
+# topic = "my-topic"
+#
+# ## Required. Data format to consume.
+# ## Each data format has its own unique set of configuration options.
+# ## Read more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
+# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
+# ## Application Default Credentials, which is preferred.
+# # credentials_file = "path/to/my/creds.json"
+#
+# ## Optional. If true, will send all metrics per write in one PubSub message.
+# # send_batched = true
+#
+# ## The following publish_* parameters specifically configures batching
+# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
+# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
+#
+# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
+# ## when it has this many PubSub messages. If send_batched is true,
+# ## this is ignored and treated as if it were 1.
+# # publish_count_threshold = 1000
+#
+# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
+# ## when it has this many PubSub messages. If send_batched is true,
+# ## this is ignored and treated as if it were 1
+# # publish_byte_threshold = 1000000
+#
+# ## Optional. Specifically configures requests made to the PubSub API.
+# # publish_num_go_routines = 2
+#
+# ## Optional. Specifies a timeout for requests to the PubSub API.
+# # publish_timeout = "30s"
+#
+# ## Optional. If true, published PubSub message data will be base64-encoded.
+# # base64_data = false
+#
+# ## Optional. PubSub attributes to add to metrics.
+# # [outputs.cloud_pubsub.attributes]
+# # my_attr = "tag_value"
+
+
+# # Configuration for AWS CloudWatch output.
+# [[outputs.cloudwatch]]
+# ## Amazon REGION
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Namespace for the CloudWatch MetricDatums
+# namespace = "InfluxData/Telegraf"
+#
+# ## If you have a large amount of metrics, you should consider to send statistic
+# ## values instead of raw metrics which could not only improve performance but
+# ## also save AWS API cost. If enable this flag, this plugin would parse the required
+# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
+# ## You could use basicstats aggregator to calculate those fields. If not all statistic
+# ## fields are available, all fields would still be sent as raw metrics.
+# # write_statistics = false
+#
+# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
+# # high_resolution_metrics = false
+
+
+# # Configuration for CrateDB to send metrics to.
+# [[outputs.cratedb]]
+# # A github.com/jackc/pgx connection string.
+# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
+# url = "postgres://user:password@localhost/schema?sslmode=disable"
+# # Timeout for all CrateDB queries.
+# timeout = "5s"
+# # Name of the table to store metrics in.
+# table = "metrics"
+# # If true, and the metrics table does not exist, create it automatically.
+# table_create = true
+
+
+# # Configuration for DataDog API to send metrics to.
+# [[outputs.datadog]]
+# ## Datadog API key
+# apikey = "my-secret-key"
+#
+# ## Connection timeout.
+# # timeout = "5s"
+#
+# ## Write URL override; useful for debugging.
+# # url = "https://app.datadoghq.com/api/v1/series"
+
+
+# # Send metrics to nowhere at all
+# [[outputs.discard]]
+# # no configuration
+
+
+# # Send telegraf metrics to a Dynatrace environment
+# [[outputs.dynatrace]]
+# ## For usage with the Dynatrace OneAgent you can omit any configuration,
+# ## the only requirement is that the OneAgent is running on the same host.
+# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
+# ##
+# ## Your Dynatrace environment URL.
+# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
+# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
+# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
+# url = ""
+#
+# ## Your Dynatrace API token.
+# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
+# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
+# api_token = ""
+#
+# ## Optional prefix for metric names (e.g.: "telegraf.")
+# prefix = "telegraf."
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional flag for ignoring tls certificate check
+# # insecure_skip_verify = false
+#
+#
+# ## Connection timeout, defaults to "5s" if not set.
+# timeout = "5s"
+
+
+# # Configuration for Elasticsearch to send metrics to.
+# [[outputs.elasticsearch]]
+# ## The full HTTP endpoint URL for your Elasticsearch instance
+# ## Multiple urls can be specified as part of the same cluster,
+# ## this means that only ONE of the urls will be written to each interval.
+# urls = [ "http://node1.es.example.com:9200" ] # required.
+# ## Elasticsearch client timeout, defaults to "5s" if not set.
+# timeout = "5s"
+# ## Set to true to ask Elasticsearch a list of all cluster nodes,
+# ## thus it is not necessary to list all nodes in the urls config option.
+# enable_sniffer = false
+# ## Set the interval to check if the Elasticsearch nodes are available
+# ## Setting to "0s" will disable the health check (not recommended in production)
+# health_check_interval = "10s"
+# ## HTTP basic authentication details
+# # username = "telegraf"
+# # password = "mypassword"
+#
+# ## Index Config
+# ## The target index for metrics (Elasticsearch will create if it not exists).
+# ## You can use the date specifiers below to create indexes per time frame.
+# ## The metric timestamp will be used to decide the destination index name
+# # %Y - year (2016)
+# # %y - last two digits of year (00..99)
+# # %m - month (01..12)
+# # %d - day of month (e.g., 01)
+# # %H - hour (00..23)
+# # %V - week of the year (ISO week) (01..53)
+# ## Additionally, you can specify a tag name using the notation {{tag_name}}
+# ## which will be used as part of the index name. If the tag does not exist,
+# ## the default tag value will be used.
+# # index_name = "telegraf-{{host}}-%Y.%m.%d"
+# # default_tag_value = "none"
+# index_name = "telegraf-%Y.%m.%d" # required.
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Template Config
+# ## Set to true if you want telegraf to manage its index template.
+# ## If enabled it will create a recommended index template for telegraf indexes
+# manage_template = true
+# ## The template name used for telegraf indexes
+# template_name = "telegraf"
+# ## Set to true if you want telegraf to overwrite an existing template
+# overwrite_template = false
+# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
+# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
+# force_document_id = false
+
+
+# # Send metrics to command as input over stdin
+# [[outputs.exec]]
+# ## Command to ingest metrics via stdin.
+# command = ["tee", "-a", "/dev/null"]
+#
+# ## Timeout for command to complete.
+# # timeout = "5s"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Run executable as long-running output plugin
+# [[outputs.execd]]
+# ## Program to run as daemon
+# command = ["my-telegraf-output", "--some-flag", "value"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to export.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send telegraf metrics to file(s)
+# [[outputs.file]]
+# ## Files to write to, "stdout" is a specially handled file.
+# files = ["stdout", "/tmp/metrics.out"]
+#
+# ## Use batch serialization format instead of line based delimiting. The
+# ## batch format allows for the production of non line based output formats and
+# ## may more efficiently encode metric groups.
+# # use_batch_format = false
+#
+# ## The file will be rotated after the time interval specified. When set
+# ## to 0 no time based rotation is performed.
+# # rotation_interval = "0d"
+#
+# ## The logfile will be rotated when it becomes larger than the specified
+# ## size. When set to 0 no size based rotation is performed.
+# # rotation_max_size = "0MB"
+#
+# ## Maximum number of rotated archives to keep, any older logs are deleted.
+# ## If set to -1, no archives are removed.
+# # rotation_max_archives = 5
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Configuration for Graphite server to send metrics to
+# [[outputs.graphite]]
+# ## TCP endpoint for your graphite instance.
+# ## If multiple endpoints are configured, output will be load balanced.
+# ## Only one of the endpoints will be written to with each iteration.
+# servers = ["localhost:2003"]
+# ## Prefix metrics name
+# prefix = ""
+# ## Graphite output template
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# template = "host.tags.measurement.field"
+#
+# ## Enable Graphite tags support
+# # graphite_tag_support = false
+#
+# ## Character for separating metric name and field for Graphite tags
+# # graphite_separator = "."
+#
+# ## Graphite templates patterns
+# ## 1. Template for cpu
+# ## 2. Template for disk*
+# ## 3. Default template
+# # templates = [
+# # "cpu tags.measurement.host.field",
+# # "disk* measurement.field",
+# # "host.measurement.tags.field"
+# #]
+#
+# ## timeout in seconds for the write connection to graphite
+# timeout = 2
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Send telegraf metrics to graylog
+# [[outputs.graylog]]
+# ## UDP endpoint for your graylog instance.
+# servers = ["127.0.0.1:12201"]
+#
+# ## The field to use as the GELF short_message, if unset the static string
+# ## "telegraf" will be used.
+# ## example: short_message_field = "message"
+# # short_message_field = ""
+
+
+# # Configurable HTTP health check resource based on metrics
+# [[outputs.health]]
+# ## Address and port to listen on.
+# ## ex: service_address = "http://localhost:8080"
+# ## service_address = "unix:///var/run/telegraf-health.sock"
+# # service_address = "http://:8080"
+#
+# ## The maximum duration for reading the entire request.
+# # read_timeout = "5s"
+# ## The maximum duration for writing the entire response.
+# # write_timeout = "5s"
+#
+# ## Username and password to accept for HTTP basic authentication.
+# # basic_username = "user1"
+# # basic_password = "secret"
+#
+# ## Allowed CA certificates for client certificates.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## TLS server certificate and private key.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## One or more check sub-tables should be defined, it is also recommended to
+# ## use metric filtering to limit the metrics that flow into this output.
+# ##
+# ## When using the default buffer sizes, this example will fail when the
+# ## metric buffer is half full.
+# ##
+# ## namepass = ["internal_write"]
+# ## tagpass = { output = ["influxdb"] }
+# ##
+# ## [[outputs.health.compares]]
+# ## field = "buffer_size"
+# ## lt = 5000.0
+# ##
+# ## [[outputs.health.contains]]
+# ## field = "buffer_size"
+
+
+# # A plugin that can transmit metrics over HTTP
+# [[outputs.http]]
+# ## URL is the address to send metrics to
+# url = "http://127.0.0.1:8080/telegraf"
+#
+# ## Timeout for HTTP message
+# # timeout = "5s"
+#
+# ## HTTP method, one of: "POST" or "PUT"
+# # method = "POST"
+#
+# ## HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## OAuth2 Client Credentials Grant
+# # client_id = "clientid"
+# # client_secret = "secret"
+# # token_url = "https://indentityprovider/oauth2/v1/token"
+# # scopes = ["urn:opc:idm:__myscopes__"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to output.
+# ## Each data format has it's own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+#
+# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Additional HTTP headers
+# # [outputs.http.headers]
+# # # Should be set manually to "application/json" for json data_format
+# # Content-Type = "text/plain; charset=utf-8"
+
+
+# # Configuration for sending metrics to InfluxDB
+# [[outputs.influxdb_v2]]
+# ## The URLs of the InfluxDB cluster nodes.
+# ##
+# ## Multiple URLs can be specified for a single cluster, only ONE of the
+# ## urls will be written to each interval.
+# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
+# urls = ["http://127.0.0.1:8086"]
+#
+# ## Token for authentication.
+# token = ""
+#
+# ## Organization is the name of the organization you wish to write to; must exist.
+# organization = ""
+#
+# ## Destination bucket to write into.
+# bucket = ""
+#
+# ## The value of this tag will be used to determine the bucket. If this
+# ## tag is not set the 'bucket' option is used as the default.
+# # bucket_tag = ""
+#
+# ## If true, the bucket tag will not be added to the metric.
+# # exclude_bucket_tag = false
+#
+# ## Timeout for HTTP messages.
+# # timeout = "5s"
+#
+# ## Additional HTTP headers
+# # http_headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## HTTP Proxy override, if unset values the standard proxy environment
+# ## variables are consulted to determine which proxy, if any, should be used.
+# # http_proxy = "http://corporate.proxy:3128"
+#
+# ## HTTP User-Agent
+# # user_agent = "telegraf"
+#
+# ## Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "gzip"
+#
+# ## Enable or disable uint support for writing uints influxdb 2.0.
+# # influx_uint_support = false
+#
+# ## Optional TLS Config for use on HTTP connections.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Configuration for sending metrics to an Instrumental project
+# [[outputs.instrumental]]
+# ## Project API Token (required)
+# api_token = "API Token" # required
+# ## Prefix the metrics with a given name
+# prefix = ""
+# ## Stats output template (Graphite formatting)
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+# template = "host.tags.measurement.field"
+# ## Timeout in seconds to connect
+# timeout = "2s"
+# ## Display Communication to Instrumental
+# debug = false
+
+
+# # Configuration for the Kafka server to send metrics to
+# [[outputs.kafka]]
+# ## URLs of kafka brokers
+# brokers = ["localhost:9092"]
+# ## Kafka topic for producer messages
+# topic = "telegraf"
+#
+# ## The value of this tag will be used as the topic. If not set the 'topic'
+# ## option is used.
+# # topic_tag = ""
+#
+# ## If true, the 'topic_tag' will be removed from to the metric.
+# # exclude_topic_tag = false
+#
+# ## Optional Client id
+# # client_id = "Telegraf"
+#
+# ## Set the minimal supported Kafka version. Setting this enables the use of new
+# ## Kafka features and APIs. Of particular interest, lz4 compression
+# ## requires at least version 0.10.0.0.
+# ## ex: version = "1.1.0"
+# # version = ""
+#
+# ## Optional topic suffix configuration.
+# ## If the section is omitted, no suffix is used.
+# ## Following topic suffix methods are supported:
+# ## measurement - suffix equals to separator + measurement's name
+# ## tags - suffix equals to separator + specified tags' values
+# ## interleaved with separator
+#
+# ## Suffix equals to "_" + measurement name
+# # [outputs.kafka.topic_suffix]
+# # method = "measurement"
+# # separator = "_"
+#
+# ## Suffix equals to "__" + measurement's "foo" tag value.
+# ## If there's no such a tag, suffix equals to an empty string
+# # [outputs.kafka.topic_suffix]
+# # method = "tags"
+# # keys = ["foo"]
+# # separator = "__"
+#
+# ## Suffix equals to "_" + measurement's "foo" and "bar"
+# ## tag values, separated by "_". If there is no such tags,
+# ## their values treated as empty strings.
+# # [outputs.kafka.topic_suffix]
+# # method = "tags"
+# # keys = ["foo", "bar"]
+# # separator = "_"
+#
+# ## The routing tag specifies a tagkey on the metric whose value is used as
+# ## the message key. The message key is used to determine which partition to
+# ## send the message to. This tag is prefered over the routing_key option.
+# routing_tag = "host"
+#
+# ## The routing key is set as the message key and used to determine which
+# ## partition to send the message to. This value is only used when no
+# ## routing_tag is set or as a fallback when the tag specified in routing tag
+# ## is not found.
+# ##
+# ## If set to "random", a random value will be generated for each message.
+# ##
+# ## When unset, no message key is added and each message is routed to a random
+# ## partition.
+# ##
+# ## ex: routing_key = "random"
+# ## routing_key = "telegraf"
+# # routing_key = ""
+#
+# ## CompressionCodec represents the various compression codecs recognized by
+# ## Kafka in messages.
+# ## 0 : No compression
+# ## 1 : Gzip compression
+# ## 2 : Snappy compression
+# ## 3 : LZ4 compression
+# # compression_codec = 0
+#
+# ## RequiredAcks is used in Produce Requests to tell the broker how many
+# ## replica acknowledgements it must see before responding
+# ## 0 : the producer never waits for an acknowledgement from the broker.
+# ## This option provides the lowest latency but the weakest durability
+# ## guarantees (some data will be lost when a server fails).
+# ## 1 : the producer gets an acknowledgement after the leader replica has
+# ## received the data. This option provides better durability as the
+# ## client waits until the server acknowledges the request as successful
+# ## (only messages that were written to the now-dead leader but not yet
+# ## replicated will be lost).
+# ## -1: the producer gets an acknowledgement after all in-sync replicas have
+# ## received the data. This option provides the best durability, we
+# ## guarantee that no messages will be lost as long as at least one in
+# ## sync replica remains.
+# # required_acks = -1
+#
+# ## The maximum number of times to retry sending a metric before failing
+# ## until the next flush.
+# # max_retry = 3
+#
+# ## The maximum permitted size of a message. Should be set equal to or
+# ## smaller than the broker's 'message.max.bytes'.
+# # max_message_bytes = 1000000
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional SASL Config
+# # sasl_username = "kafka"
+# # sasl_password = "secret"
+#
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Configuration for the AWS Kinesis output.
+# [[outputs.kinesis]]
+# ## Amazon REGION of kinesis endpoint.
+# region = "ap-southeast-2"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Kinesis StreamName must exist prior to starting telegraf.
+# streamname = "StreamName"
+# ## DEPRECATED: PartitionKey as used for sharding data.
+# partitionkey = "PartitionKey"
+# ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
+# ## This allows for scaling across multiple shards in a stream.
+# ## This will cause issues with ordering.
+# use_random_partitionkey = false
+# ## The partition key can be calculated using one of several methods:
+# ##
+# ## Use a static value for all writes:
+# # [outputs.kinesis.partition]
+# # method = "static"
+# # key = "howdy"
+# #
+# ## Use a random partition key on each write:
+# # [outputs.kinesis.partition]
+# # method = "random"
+# #
+# ## Use the measurement name as the partition key:
+# # [outputs.kinesis.partition]
+# # method = "measurement"
+# #
+# ## Use the value of a tag for all writes, if the tag is not set the empty
+# ## default option will be used. When no default, defaults to "telegraf"
+# # [outputs.kinesis.partition]
+# # method = "tag"
+# # key = "host"
+# # default = "mykey"
+#
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+#
+# ## debug will show upstream aws messages.
+# debug = false
+
+
+# # Configuration for Librato API to send metrics to.
+# [[outputs.librato]]
+# ## Librato API Docs
+# ## http://dev.librato.com/v1/metrics-authentication
+# ## Librato API user
+# api_user = "telegraf@influxdb.com" # required.
+# ## Librato API token
+# api_token = "my-secret-token" # required.
+# ## Debug
+# # debug = false
+# ## Connection timeout.
+# # timeout = "5s"
+# ## Output source Template (same as graphite buckets)
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+# ## This template is used in librato's source (not metric's name)
+# template = "host"
+#
+
+
+# # Configuration for MQTT server to send metrics to
+# [[outputs.mqtt]]
+# servers = ["localhost:1883"] # required.
+#
+# ## MQTT outputs send metrics to this topic format
+# ## "///"
+# ## ex: prefix/web01.example.com/mem
+# topic_prefix = "telegraf"
+#
+# ## QoS policy for messages
+# ## 0 = at most once
+# ## 1 = at least once
+# ## 2 = exactly once
+# # qos = 2
+#
+# ## username and password to connect MQTT server.
+# # username = "telegraf"
+# # password = "metricsmetricsmetricsmetrics"
+#
+# ## client ID, if not set a random ID is generated
+# # client_id = ""
+#
+# ## Timeout for write operations. default: 5s
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## When true, metrics will be sent in one MQTT message per flush. Otherwise,
+# ## metrics are written one metric per MQTT message.
+# # batch = false
+#
+# ## When true, metric will have RETAIN flag set, making broker cache entries until someone
+# ## actually reads it
+# # retain = false
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send telegraf measurements to NATS
+# [[outputs.nats]]
+# ## URLs of NATS servers
+# servers = ["nats://localhost:4222"]
+#
+# ## Optional credentials
+# # username = ""
+# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
+# ## NATS subject for producer messages
+# subject = "telegraf"
+#
+# ## Use Transport Layer Security
+# # secure = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send metrics to New Relic metrics endpoint
+# [[outputs.newrelic]]
+# ## New Relic Insights API key
+# insights_key = "insights api key"
+#
+# ## Prefix to add to add to metric name for easy identification.
+# # metric_prefix = ""
+#
+# ## Timeout for writes to the New Relic API.
+# # timeout = "15s"
+
+
+# # Send telegraf measurements to NSQD
+# [[outputs.nsq]]
+# ## Location of nsqd instance listening on TCP
+# server = "localhost:4150"
+# ## NSQ topic for producer messages
+# topic = "telegraf"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Configuration for OpenTSDB server to send metrics to
+# [[outputs.opentsdb]]
+# ## prefix for metrics keys
+# prefix = "my.specific.prefix."
+#
+# ## DNS name of the OpenTSDB server
+# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
+# ## telnet API. "http://opentsdb.example.com" will use the Http API.
+# host = "opentsdb.example.com"
+#
+# ## Port of the OpenTSDB server
+# port = 4242
+#
+# ## Number of data points to send to OpenTSDB in Http requests.
+# ## Not used with telnet API.
+# http_batch_size = 50
+#
+# ## URI Path for Http requests to OpenTSDB.
+# ## Used in cases where OpenTSDB is located behind a reverse proxy.
+# http_path = "/api/put"
+#
+# ## Debug true - Prints OpenTSDB communication
+# debug = false
+#
+# ## Separator separates measurement name from field
+# separator = "_"
+
+
+# # Configuration for the Prometheus client to spawn
+# [[outputs.prometheus_client]]
+# ## Address to listen on
+# listen = ":9273"
+#
+# ## Metric version controls the mapping from Telegraf metrics into
+# ## Prometheus format. When using the prometheus input, use the same value in
+# ## both plugins to ensure metrics are round-tripped without modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## Use HTTP Basic Authentication.
+# # basic_username = "Foo"
+# # basic_password = "Bar"
+#
+# ## If set, the IP Ranges which are allowed to access metrics.
+# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
+# # ip_range = []
+#
+# ## Path to publish the metrics on.
+# # path = "/metrics"
+#
+# ## Expiration interval for each metric. 0 == no expiration
+# # expiration_interval = "60s"
+#
+# ## Collectors to enable, valid entries are "gocollector" and "process".
+# ## If unset, both are enabled.
+# # collectors_exclude = ["gocollector", "process"]
+#
+# ## Send string metrics as Prometheus labels.
+# ## Unless set to false all string metrics will be sent as labels.
+# # string_as_label = true
+#
+# ## If set, enable TLS with the given certificate.
+# # tls_cert = "/etc/ssl/telegraf.crt"
+# # tls_key = "/etc/ssl/telegraf.key"
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Export metric collection time.
+# # export_timestamp = false
+
+
+# # Configuration for the Riemann server to send metrics to
+# [[outputs.riemann]]
+# ## The full TCP or UDP URL of the Riemann server
+# url = "tcp://localhost:5555"
+#
+# ## Riemann event TTL, floating-point time in seconds.
+# ## Defines how long that an event is considered valid for in Riemann
+# # ttl = 30.0
+#
+# ## Separator to use between measurement and field name in Riemann service name
+# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
+# separator = "/"
+#
+# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
+# # measurement_as_attribute = false
+#
+# ## Send string metrics as Riemann event states.
+# ## Unless enabled all string metrics will be ignored
+# # string_as_state = false
+#
+# ## A list of tag keys whose values get sent as Riemann tags.
+# ## If empty, all Telegraf tag values will be sent as tags
+# # tag_keys = ["telegraf","custom_tag"]
+#
+# ## Additional Riemann tags to send.
+# # tags = ["telegraf-output"]
+#
+# ## Description for Riemann event
+# # description_text = "metrics collected from telegraf"
+#
+# ## Riemann client write timeout, defaults to "5s" if not set.
+# # timeout = "5s"
+
+
+# # Configuration for the Riemann server to send metrics to
+# [[outputs.riemann_legacy]]
+# ## URL of server
+# url = "localhost:5555"
+# ## transport protocol to use either tcp or udp
+# transport = "tcp"
+# ## separator to use between input name and field name in Riemann service name
+# separator = " "
+
+
+# # Generic socket writer capable of handling multiple socket types.
+# [[outputs.socket_writer]]
+# ## URL to connect to
+# # address = "tcp://127.0.0.1:8094"
+# # address = "tcp://example.com:http"
+# # address = "tcp4://127.0.0.1:8094"
+# # address = "tcp6://127.0.0.1:8094"
+# # address = "tcp6://[2001:db8::1]:8094"
+# # address = "udp://127.0.0.1:8094"
+# # address = "udp4://127.0.0.1:8094"
+# # address = "udp6://127.0.0.1:8094"
+# # address = "unix:///tmp/telegraf.sock"
+# # address = "unixgram:///tmp/telegraf.sock"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## Content encoding for packet-based connections (i.e. UDP, unixgram).
+# ## Can be set to "gzip" or to "identity" to apply no encoding.
+# ##
+# # content_encoding = "identity"
+#
+# ## Data format to generate.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+
+
+# # Configuration for Google Cloud Stackdriver to send metrics to
+# [[outputs.stackdriver]]
+# ## GCP Project
+# project = "erudite-bloom-151019"
+#
+# ## The namespace for the metric descriptor
+# namespace = "telegraf"
+#
+# ## Custom resource type
+# # resource_type = "generic_node"
+#
+# ## Additional resource labels
+# # [outputs.stackdriver.resource_labels]
+# # node_id = "$HOSTNAME"
+# # namespace = "myapp"
+# # location = "eu-north0"
+
+
+# # A plugin that can transmit metrics to Sumo Logic HTTP Source
+# [[outputs.sumologic]]
+# ## Unique URL generated for your HTTP Metrics Source.
+# ## This is the address to send metrics to.
+# # url = "https://events.sumologic.net/receiver/v1/http/"
+#
+# ## Data format to be used for sending metrics.
+# ## This will set the "Content-Type" header accordingly.
+# ## Currently supported formats:
+# ## * graphite - for Content-Type of application/vnd.sumologic.graphite
+# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
+# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
+# ##
+# ## More information can be found at:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
+# ##
+# ## NOTE:
+# ## When unset, telegraf will by default use the influx serializer which is currently unsupported
+# ## in HTTP Source.
+# data_format = "carbon2"
+#
+# ## Timeout used for HTTP request
+# # timeout = "5s"
+#
+# ## Max HTTP request body size in bytes before compression (if applied).
+# ## By default 1MB is recommended.
+# ## NOTE:
+# ## Bear in mind that in some serializer a metric even though serialized to multiple
+# ## lines cannot be split any further so setting this very low might not work
+# ## as expected.
+# # max_request_body_size = 1000000
+#
+# ## Additional, Sumo specific options.
+# ## Full list can be found here:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
+#
+# ## Desired source name.
+# ## Useful if you want to override the source name configured for the source.
+# # source_name = ""
+#
+# ## Desired host name.
+# ## Useful if you want to override the source host configured for the source.
+# # source_host = ""
+#
+# ## Desired source category.
+# ## Useful if you want to override the source category configured for the source.
+# # source_category = ""
+#
+# ## Comma-separated key=value list of dimensions to apply to every metric.
+# ## Custom dimensions will allow you to query your metrics at a more granular level.
+# # dimensions = ""
+
+
+# # Configuration for Syslog server to send metrics to
+# [[outputs.syslog]]
+# ## URL to connect to
+# ## ex: address = "tcp://127.0.0.1:8094"
+# ## ex: address = "tcp4://127.0.0.1:8094"
+# ## ex: address = "tcp6://127.0.0.1:8094"
+# ## ex: address = "tcp6://[2001:db8::1]:8094"
+# ## ex: address = "udp://127.0.0.1:8094"
+# ## ex: address = "udp4://127.0.0.1:8094"
+# ## ex: address = "udp6://127.0.0.1:8094"
+# address = "tcp://127.0.0.1:8094"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## The framing technique with which it is expected that messages are
+# ## transported (default = "octet-counting"). Whether the messages come
+# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
+# ## be one of "octet-counting", "non-transparent".
+# # framing = "octet-counting"
+#
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
+# ## Must be one of "LF", or "NUL".
+# # trailer = "LF"
+#
+# ## SD-PARAMs settings
+# ## Syslog messages can contain key/value pairs within zero or more
+# ## structured data sections. For each unrecognized metric tag/field a
+# ## SD-PARAMS is created.
+# ##
+# ## Example:
+# ## [[outputs.syslog]]
+# ## sdparam_separator = "_"
+# ## default_sdid = "default@32473"
+# ## sdids = ["foo@123", "bar@456"]
+# ##
+# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
+# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
+#
+# ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
+# # sdparam_separator = "_"
+#
+# ## Default sdid used for tags/fields that don't contain a prefix defined in
+# ## the explicit sdids setting below If no default is specified, no SD-PARAMs
+# ## will be used for unrecognized field.
+# # default_sdid = "default@32473"
+#
+# ## List of explicit prefixes to extract from tag/field keys and use as the
+# ## SDID, if they match (see above example for more details):
+# # sdids = ["foo@123", "bar@456"]
+#
+# ## Default severity value. Severity and Facility are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
+# ## with key "severity_code" is defined. If unset, 5 (notice) is the default
+# # default_severity_code = 5
+#
+# ## Default facility value. Facility and Severity are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
+# ## key "facility_code" is defined. If unset, 1 (user-level) is the default
+# # default_facility_code = 1
+#
+# ## Default APP-NAME value (RFC5424#section-6.2.5)
+# ## Used when no metric tag with key "appname" is defined.
+# ## If unset, "Telegraf" is the default
+# # default_appname = "Telegraf"
+
+
+# # Configuration for Amazon Timestream output.
+# [[outputs.timestream]]
+# ## Amazon Region
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order:
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) Explicit credentials from 'access_key' and 'secret_key'
+# ## 3) Shared profile from 'profile'
+# ## 4) Environment variables
+# ## 5) Shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Timestream database where the metrics will be inserted.
+# ## The database must exist prior to starting Telegraf.
+# database_name = "yourDatabaseNameHere"
+#
+# ## Specifies if the plugin should describe the Timestream database upon starting
+# ## to validate if it has access necessary permissions, connection, etc., as a safety check.
+# ## If the describe operation fails, the plugin will not start
+# ## and therefore the Telegraf agent will not start.
+# describe_database_on_start = false
+#
+# ## The mapping mode specifies how Telegraf records are represented in Timestream.
+# ## Valid values are: single-table, multi-table.
+# ## For example, consider the following data in line protocol format:
+# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200
+# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200
+# ## where weather and airquality are the measurement names, location and season are tags,
+# ## and temperature, humidity, no2, pm25 are fields.
+# ## In multi-table mode:
+# ## - first line will be ingested to table named weather
+# ## - second line will be ingested to table named airquality
+# ## - the tags will be represented as dimensions
+# ## - first table (weather) will have two records:
+# ## one with measurement name equals to temperature,
+# ## another with measurement name equals to humidity
+# ## - second table (airquality) will have two records:
+# ## one with measurement name equals to no2,
+# ## another with measurement name equals to pm25
+# ## - the Timestream tables from the example will look like this:
+# ## TABLE "weather":
+# ## time | location | season | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82
+# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71
+# ## TABLE "airquality":
+# ## time | location | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-west | no2 | 5
+# ## 2016-06-13 17:43:50 | us-west | pm25 | 16
+# ## In single-table mode:
+# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name)
+# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name)
+# ## - location and season will be represented as dimensions
+# ## - temperature, humidity, no2, pm25 will be represented as measurement name
+# ## - the Timestream table from the example will look like this:
+# ## Assuming:
+# ## - single_table_name = "my_readings"
+# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace"
+# ## TABLE "my_readings":
+# ## time | location | season | namespace | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82
+# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71
+# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5
+# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16
+# ## In most cases, using multi-table mapping mode is recommended.
+# ## However, you can consider using single-table in situations when you have thousands of measurement names.
+# mapping_mode = "multi-table"
+#
+# ## Only valid and required for mapping_mode = "single-table"
+# ## Specifies the Timestream table where the metrics will be uploaded.
+# # single_table_name = "yourTableNameHere"
+#
+# ## Only valid and required for mapping_mode = "single-table"
+# ## Describes what will be the Timestream dimension name for the Telegraf
+# ## measurement name.
+# # single_table_dimension_name_for_telegraf_measurement_name = "namespace"
+#
+# ## Specifies if the plugin should create the table, if the table do not exist.
+# ## The plugin writes the data without prior checking if the table exists.
+# ## When the table does not exist, the error returned from Timestream will cause
+# ## the plugin to create the table, if this parameter is set to true.
+# create_table_if_not_exists = true
+#
+# ## Only valid and required if create_table_if_not_exists = true
+# ## Specifies the Timestream table magnetic store retention period in days.
+# ## Check Timestream documentation for more details.
+# create_table_magnetic_store_retention_period_in_days = 365
+#
+# ## Only valid and required if create_table_if_not_exists = true
+# ## Specifies the Timestream table memory store retention period in hours.
+# ## Check Timestream documentation for more details.
+# create_table_memory_store_retention_period_in_hours = 24
+#
+# ## Only valid and optional if create_table_if_not_exists = true
+# ## Specifies the Timestream table tags.
+# ## Check Timestream documentation for more details
+# # create_table_tags = { "foo" = "bar", "environment" = "dev"}
+
+
+# # Write metrics to Warp 10
+# [[outputs.warp10]]
+# # Prefix to add to the measurement.
+# prefix = "telegraf."
+#
+# # URL of the Warp 10 server
+# warp_url = "http://localhost:8080"
+#
+# # Write token to access your app on warp 10
+# token = "Token"
+#
+# # Warp 10 query timeout
+# # timeout = "15s"
+#
+# ## Print Warp 10 error body
+# # print_error_body = false
+#
+# ##Â Max string error size
+# # max_string_error_size = 511
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Configuration for Wavefront server to send metrics to
+# [[outputs.wavefront]]
+# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
+# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
+# url = "https://metrics.wavefront.com"
+#
+# ## Authentication Token for Wavefront. Only required if using Direct Ingestion
+# #token = "DUMMY_TOKEN"
+#
+# ## DNS name of the wavefront proxy server. Do not use if url is specified
+# #host = "wavefront.example.com"
+#
+# ## Port that the Wavefront proxy server listens on. Do not use if url is specified
+# #port = 2878
+#
+# ## prefix for metrics keys
+# #prefix = "my.specific.prefix."
+#
+# ## whether to use "value" for name of simple fields. default is false
+# #simple_fields = false
+#
+# ## character to use between metric and field name. default is . (dot)
+# #metric_separator = "."
+#
+# ## Convert metric name paths to use metricSeparator character
+# ## When true will convert all _ (underscore) characters in final metric name. default is true
+# #convert_paths = true
+#
+# ## Use Strict rules to sanitize metric and tag names from invalid characters
+# ## When enabled forward slash (/) and comma (,) will be accepted
+# #use_strict = false
+#
+# ## Use Regex to sanitize metric and tag names from invalid characters
+# ## Regex is more thorough, but significantly slower. default is false
+# #use_regex = false
+#
+# ## point tags to use as the source name for Wavefront (if none found, host will be used)
+# #source_override = ["hostname", "address", "agent_host", "node_host"]
+#
+# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
+# #convert_bool = true
+#
+# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
+# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
+# #truncate_tags = false
+#
+# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
+# ## deprecated in 1.9; use the enum processor plugin
+# #[[outputs.wavefront.string_to_number.elasticsearch]]
+# # green = 1.0
+# # yellow = 0.5
+# # red = 0.0
+
+
+###############################################################################
+# PROCESSOR PLUGINS #
+###############################################################################
+
+
+# # Clone metrics and apply modifications.
+# [[processors.clone]]
+# ## All modifications on inputs and aggregators can be overridden:
+# # name_override = "new_name"
+# # name_prefix = "new_name_prefix"
+# # name_suffix = "new_name_suffix"
+#
+# ## Tags to be added (all values must be strings)
+# # [processors.clone.tags]
+# # additional_tag = "tag_value"
+
+
+# # Convert values to another metric value type
+# [[processors.converter]]
+# ## Tags to convert
+# ##
+# ## The table key determines the target type, and the array of key-values
+# ## select the keys to convert. The array may contain globs.
+# ## = [...]
+# [processors.converter.tags]
+# measurement = []
+# string = []
+# integer = []
+# unsigned = []
+# boolean = []
+# float = []
+#
+# ## Fields to convert
+# ##
+# ## The table key determines the target type, and the array of key-values
+# ## select the keys to convert. The array may contain globs.
+# ## = [...]
+# [processors.converter.fields]
+# measurement = []
+# tag = []
+# string = []
+# integer = []
+# unsigned = []
+# boolean = []
+# float = []
+
+
+# # Dates measurements, tags, and fields that pass through this filter.
+# [[processors.date]]
+# ## New tag to create
+# tag_key = "month"
+#
+# ## New field to create (cannot set both field_key and tag_key)
+# # field_key = "month"
+#
+# ## Date format string, must be a representation of the Go "reference time"
+# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
+# date_format = "Jan"
+#
+# ## If destination is a field, date format can also be one of
+# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
+# # date_format = "unix"
+#
+# ## Offset duration added to the date string when writing the new tag.
+# # date_offset = "0s"
+#
+# ## Timezone to use when creating the tag or field using a reference time
+# ## string. This can be set to one of "UTC", "Local", or to a location name
+# ## in the IANA Time Zone database.
+# ## example: timezone = "America/Los_Angeles"
+# # timezone = "UTC"
+
+
+# # Filter metrics with repeating field values
+# [[processors.dedup]]
+# ## Maximum time to suppress output
+# dedup_interval = "600s"
+
+
+# # Defaults sets default value(s) for specified fields that are not set on incoming metrics.
+# [[processors.defaults]]
+# ## Ensures a set of fields always exists on your metric(s) with their
+# ## respective default value.
+# ## For any given field pair (key = default), if it's not set, a field
+# ## is set on the metric with the specified default.
+# ##
+# ## A field is considered not set if it is nil on the incoming metric;
+# ## or it is not nil but its value is an empty string or is a string
+# ## of one or more spaces.
+# ## =
+# # [processors.defaults.fields]
+# # field_1 = "bar"
+# # time_idle = 0
+# # is_error = true
+
+
+# # Map enum values according to given table.
+# [[processors.enum]]
+# [[processors.enum.mapping]]
+# ## Name of the field to map
+# field = "status"
+#
+# ## Name of the tag to map
+# # tag = "status"
+#
+# ## Destination tag or field to be used for the mapped value. By default the
+# ## source tag or field is used, overwriting the original value.
+# dest = "status_code"
+#
+# ## Default value to be used for all values not contained in the mapping
+# ## table. When unset, the unmodified value for the field will be used if no
+# ## match is found.
+# # default = 0
+#
+# ## Table of mappings
+# [processors.enum.mapping.value_mappings]
+# green = 1
+# amber = 2
+# red = 3
+
+
+# # Run executable as long-running processor plugin
+# [[processors.execd]]
+# ## Program to run as daemon
+# ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
+# command = ["cat"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+
+
+# # Performs file path manipulations on tags and fields
+# [[processors.filepath]]
+# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
+# # [[processors.filepath.basename]]
+# # tag = "path"
+# # dest = "basepath"
+#
+# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
+# # [[processors.filepath.dirname]]
+# # field = "path"
+#
+# ## Treat the tag value as a path, converting it to its the last element without its suffix
+# # [[processors.filepath.stem]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to the shortest path name equivalent
+# ## to path by purely lexical processing
+# # [[processors.filepath.clean]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to a relative path that is lexically
+# ## equivalent to the source path when joined to 'base_path'
+# # [[processors.filepath.rel]]
+# # tag = "path"
+# # base_path = "/var/log"
+#
+# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
+# ## effect on Windows
+# # [[processors.filepath.toslash]]
+# # tag = "path"
+
+
+# # Add a tag of the network interface name looked up over SNMP by interface number
+# [[processors.ifname]]
+# ## Name of tag holding the interface number
+# # tag = "ifIndex"
+#
+# ## Name of output tag where service name will be added
+# # dest = "ifName"
+#
+# ## Name of tag of the SNMP agent to request the interface name from
+# # agent = "agent"
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
+#
+# ## SNMP community string.
+# # community = "public"
+#
+# ## Number of retries to attempt.
+# # retries = 3
+#
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## max_parallel_lookups is the maximum number of SNMP requests to
+# ## make at the same time.
+# # max_parallel_lookups = 100
+#
+# ## ordered controls whether or not the metrics need to stay in the
+# ## same order this plugin received them in. If false, this plugin
+# ## may change the order when data is cached. If you need metrics to
+# ## stay in order set this to true. keeping the metrics ordered may
+# ## be slightly slower
+# # ordered = false
+#
+# ## cache_ttl is the amount of time interface names are cached for a
+# ## given agent. After this period elapses if names are needed they
+# ## will be retrieved again.
+# # cache_ttl = "8h"
+
+
+# # Apply metric modifications using override semantics.
+# [[processors.override]]
+# ## All modifications on inputs and aggregators can be overridden:
+# # name_override = "new_name"
+# # name_prefix = "new_name_prefix"
+# # name_suffix = "new_name_suffix"
+#
+# ## Tags to be added (all values must be strings)
+# # [processors.override.tags]
+# # additional_tag = "tag_value"
+
+
+# # Parse a value in a specified field/tag(s) and add the result in a new metric
+# [[processors.parser]]
+# ## The name of the fields whose value will be parsed.
+# parse_fields = []
+#
+# ## If true, incoming metrics are not emitted.
+# drop_original = false
+#
+# ## If set to override, emitted metrics will be merged by overriding the
+# ## original metric using the newly parsed metrics.
+# merge = "override"
+#
+# ## The dataformat to be read from files
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Rotate a single valued metric into a multi field metric
+# [[processors.pivot]]
+# ## Tag to use for naming the new field.
+# tag_key = "name"
+# ## Field to use as the value of the new field.
+# value_key = "value"
+
+
+# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
+# [[processors.port_name]]
+# [[processors.port_name]]
+# ## Name of tag holding the port number
+# # tag = "port"
+# ## Or name of the field holding the port number
+# # field = "port"
+#
+# ## Name of output tag or field (depending on the source) where service name will be added
+# # dest = "service"
+#
+# ## Default tcp or udp
+# # default_protocol = "tcp"
+#
+# ## Tag containing the protocol (tcp or udp, case-insensitive)
+# # protocol_tag = "proto"
+#
+# ## Field containing the protocol (tcp or udp, case-insensitive)
+# # protocol_field = "proto"
+
+
+# # Print all metrics that pass through this filter.
+# [[processors.printer]]
+
+
+# # Transforms tag and field values with regex pattern
+# [[processors.regex]]
+# ## Tag and field conversions defined in a separate sub-tables
+# # [[processors.regex.tags]]
+# # ## Tag to change
+# # key = "resp_code"
+# # ## Regular expression to match on a tag value
+# # pattern = "^(\\d)\\d\\d$"
+# # ## Matches of the pattern will be replaced with this string. Use ${1}
+# # ## notation to use the text of the first submatch.
+# # replacement = "${1}xx"
+#
+# # [[processors.regex.fields]]
+# # ## Field to change
+# # key = "request"
+# # ## All the power of the Go regular expressions available here
+# # ## For example, named subgroups
+# # pattern = "^/api(?P/[\\w/]+)\\S*"
+# # replacement = "${method}"
+# # ## If result_key is present, a new field will be created
+# # ## instead of changing existing field
+# # result_key = "method"
+#
+# ## Multiple conversions may be applied for one field sequentially
+# ## Let's extract one more value
+# # [[processors.regex.fields]]
+# # key = "request"
+# # pattern = ".*category=(\\w+).*"
+# # replacement = "${1}"
+# # result_key = "search_category"
+
+
+# # Rename measurements, tags, and fields that pass through this filter.
+# [[processors.rename]]
+
+
+# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
+# [[processors.reverse_dns]]
+# ## For optimal performance, you may want to limit which metrics are passed to this
+# ## processor. eg:
+# ## namepass = ["my_metric_*"]
+#
+# ## cache_ttl is how long the dns entries should stay cached for.
+# ## generally longer is better, but if you expect a large number of diverse lookups
+# ## you'll want to consider memory use.
+# cache_ttl = "24h"
+#
+# ## lookup_timeout is how long should you wait for a single dns request to repsond.
+# ## this is also the maximum acceptable latency for a metric travelling through
+# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
+# ## be passed on unaltered.
+# ## multiple simultaneous resolution requests for the same IP will only make a
+# ## single rDNS request, and they will all wait for the answer for this long.
+# lookup_timeout = "3s"
+#
+# ## max_parallel_lookups is the maximum number of dns requests to be in flight
+# ## at the same time. Requesting hitting cached values do not count against this
+# ## total, and neither do mulptiple requests for the same IP.
+# ## It's probably best to keep this number fairly low.
+# max_parallel_lookups = 10
+#
+# ## ordered controls whether or not the metrics need to stay in the same order
+# ## this plugin received them in. If false, this plugin will change the order
+# ## with requests hitting cached results moving through immediately and not
+# ## waiting on slower lookups. This may cause issues for you if you are
+# ## depending on the order of metrics staying the same. If so, set this to true.
+# ## keeping the metrics ordered may be slightly slower.
+# ordered = false
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the field "source_ip", and put the result in the field "source_name"
+# field = "source_ip"
+# dest = "source_name"
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the tag "destination_ip", and put the result in the tag
+# ## "destination_name".
+# tag = "destination_ip"
+# dest = "destination_name"
+#
+# ## If you would prefer destination_name to be a field instead, you can use a
+# ## processors.converter after this one, specifying the order attribute.
+
+
+# # Add the S2 Cell ID as a tag based on latitude and longitude fields
+# [[processors.s2geo]]
+# ## The name of the lat and lon fields containing WGS-84 latitude and
+# ## longitude in decimal degrees.
+# # lat_field = "lat"
+# # lon_field = "lon"
+#
+# ## New tag to create
+# # tag_key = "s2_cell_id"
+#
+# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
+# # cell_level = 9
+
+
+# # Process metrics using a Starlark script
+# [[processors.starlark]]
+# ## The Starlark source can be set as a string in this configuration file, or
+# ## by referencing a file containing the script. Only one source or script
+# ## should be set at once.
+# ##
+# ## Source of the Starlark script.
+# source = '''
+# def apply(metric):
+# return metric
+# '''
+#
+# ## File containing a Starlark script.
+# # script = "/usr/local/bin/myscript.star"
+
+
+# # Perform string processing on tags, fields, and measurements
+# [[processors.strings]]
+# ## Convert a tag value to uppercase
+# # [[processors.strings.uppercase]]
+# # tag = "method"
+#
+# ## Convert a field value to lowercase and store in a new field
+# # [[processors.strings.lowercase]]
+# # field = "uri_stem"
+# # dest = "uri_stem_normalised"
+#
+# ## Convert a field value to titlecase
+# # [[processors.strings.titlecase]]
+# # field = "status"
+#
+# ## Trim leading and trailing whitespace using the default cutset
+# # [[processors.strings.trim]]
+# # field = "message"
+#
+# ## Trim leading characters in cutset
+# # [[processors.strings.trim_left]]
+# # field = "message"
+# # cutset = "\t"
+#
+# ## Trim trailing characters in cutset
+# # [[processors.strings.trim_right]]
+# # field = "message"
+# # cutset = "\r\n"
+#
+# ## Trim the given prefix from the field
+# # [[processors.strings.trim_prefix]]
+# # field = "my_value"
+# # prefix = "my_"
+#
+# ## Trim the given suffix from the field
+# # [[processors.strings.trim_suffix]]
+# # field = "read_count"
+# # suffix = "_count"
+#
+# ## Replace all non-overlapping instances of old with new
+# # [[processors.strings.replace]]
+# # measurement = "*"
+# # old = ":"
+# # new = "_"
+#
+# ## Trims strings based on width
+# # [[processors.strings.left]]
+# # field = "message"
+# # width = 10
+#
+# ## Decode a base64 encoded utf-8 string
+# # [[processors.strings.base64decode]]
+# # field = "message"
+
+
+# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
+# [[processors.tag_limit]]
+# ## Maximum number of tags to preserve
+# limit = 10
+#
+# ## List of tags to preferentially preserve
+# keep = ["foo", "bar", "baz"]
+
+
+# # Uses a Go template to create a new tag
+# [[processors.template]]
+# ## Tag to set with the output of the template.
+# tag = "topic"
+#
+# ## Go template used to create the tag value. In order to ease TOML
+# ## escaping requirements, you may wish to use single quotes around the
+# ## template string.
+# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
+
+
+# # Print all metrics that pass through this filter.
+# [[processors.topk]]
+# ## How many seconds between aggregations
+# # period = 10
+#
+# ## How many top metrics to return
+# # k = 10
+#
+# ## Over which tags should the aggregation be done. Globs can be specified, in
+# ## which case any tag matching the glob will aggregated over. If set to an
+# ## empty list is no aggregation over tags is done
+# # group_by = ['*']
+#
+# ## Over which fields are the top k are calculated
+# # fields = ["value"]
+#
+# ## What aggregation to use. Options: sum, mean, min, max
+# # aggregation = "mean"
+#
+# ## Instead of the top k largest metrics, return the bottom k lowest metrics
+# # bottomk = false
+#
+# ## The plugin assigns each metric a GroupBy tag generated from its name and
+# ## tags. If this setting is different than "" the plugin will add a
+# ## tag (which name will be the value of this setting) to each metric with
+# ## the value of the calculated GroupBy tag. Useful for debugging
+# # add_groupby_tag = ""
+#
+# ## These settings provide a way to know the position of each metric in
+# ## the top k. The 'add_rank_field' setting allows to specify for which
+# ## fields the position is required. If the list is non empty, then a field
+# ## will be added to each and every metric for each string present in this
+# ## setting. This field will contain the ranking of the group that
+# ## the metric belonged to when aggregated over that field.
+# ## The name of the field will be set to the name of the aggregation field,
+# ## suffixed with the string '_topk_rank'
+# # add_rank_fields = []
+#
+# ## These settings provide a way to know what values the plugin is generating
+# ## when aggregating metrics. The 'add_aggregate_field' setting allows to
+# ## specify for which fields the final aggregation value is required. If the
+# ## list is non empty, then a field will be added to each every metric for
+# ## each field present in this setting. This field will contain
+# ## the computed aggregation for the group that the metric belonged to when
+# ## aggregated over that field.
+# ## The name of the field will be set to the name of the aggregation field,
+# ## suffixed with the string '_topk_aggregate'
+# # add_aggregate_fields = []
+
+
+# # Rotate multi field metric into several single field metrics
+# [[processors.unpivot]]
+# ## Tag to use for the name.
+# tag_key = "name"
+# ## Field to use for the name of the value.
+# value_key = "value"
+
+
+###############################################################################
+# AGGREGATOR PLUGINS #
+###############################################################################
+
+
+# # Keep the aggregate basicstats of each metric passing through.
+# [[aggregators.basicstats]]
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+#
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## Configures which basic stats to push as fields
+# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
+
+
+# # Report the final metric of a series
+# [[aggregators.final]]
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## The time that a series is not updated until considering it final.
+# series_timeout = "5m"
+
+
+# # Create aggregate histograms.
+# [[aggregators.histogram]]
+# ## The period in which to flush the aggregator.
+# period = "30s"
+#
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## If true, the histogram will be reset on flush instead
+# ## of accumulating the results.
+# reset = false
+#
+# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
+# ## Defaults to true.
+# cumulative = true
+#
+# ## Example config that aggregates all fields of the metric.
+# # [[aggregators.histogram.config]]
+# # ## Right borders of buckets (with +Inf implicitly added).
+# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
+# # ## The name of metric.
+# # measurement_name = "cpu"
+#
+# ## Example config that aggregates only specific fields of the metric.
+# # [[aggregators.histogram.config]]
+# # ## Right borders of buckets (with +Inf implicitly added).
+# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+# # ## The name of metric.
+# # measurement_name = "diskio"
+# # ## The concrete fields of metric
+# # fields = ["io_time", "read_time", "write_time"]
+
+
+# # Merge metrics into multifield metrics by series key
+# [[aggregators.merge]]
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = true
+
+
+# # Keep the aggregate min/max of each metric passing through.
+# [[aggregators.minmax]]
+# ## General Aggregator Arguments:
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+
+
+# # Count the occurrence of values in fields.
+# [[aggregators.valuecounter]]
+# ## General Aggregator Arguments:
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+# ## The fields for which the values will be counted
+# fields = []
+
+
+###############################################################################
+# INPUT PLUGINS #
+###############################################################################
+
+
+# Read metrics about cpu usage
+[[inputs.cpu]]
+ ## Whether to report per-cpu stats or not
+ percpu = true
+ ## Whether to report total system cpu stats or not
+ totalcpu = true
+ ## If true, collect raw CPU time metrics.
+ collect_cpu_time = false
+ ## If true, compute and report the sum of all non-idle CPU states.
+ report_active = false
+
+
+# Read metrics about disk usage by mount point
+[[inputs.disk]]
+ ## By default stats will be gathered for all mount points.
+ ## Set mount_points will restrict the stats to only the specified mount points.
+ # mount_points = ["/"]
+
+ ## Ignore mount points by filesystem type.
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
+
+
+# Read metrics about disk IO by device
+[[inputs.diskio]]
+ ## By default, telegraf will gather stats for all devices including
+ ## disk partitions.
+ ## Setting devices will restrict the stats to the specified devices.
+ # devices = ["sda", "sdb", "vd*"]
+ ## Uncomment the following line if you need disk serial numbers.
+ # skip_serial_number = false
+ #
+ ## On systems which support it, device metadata can be added in the form of
+ ## tags.
+ ## Currently only Linux is supported via udev properties. You can view
+ ## available properties for a device by running:
+ ## 'udevadm info -q property -n /dev/sda'
+ ## Note: Most, but not all, udev properties can be accessed this way. Properties
+ ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
+ # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
+ #
+ ## Using the same metadata source as device_tags, you can also customize the
+ ## name of the device via templates.
+ ## The 'name_templates' parameter is a list of templates to try and apply to
+ ## the device. The template may contain variables in the form of '$PROPERTY' or
+ ## '${PROPERTY}'. The first template which does not contain any variables not
+ ## present for the device is used as the device name tag.
+ ## The typical use case is for LVM volumes, to get the VG/LV name instead of
+ ## the near-meaningless DM-0 name.
+ # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
+
+
+# Get kernel statistics from /proc/stat
+[[inputs.kernel]]
+ # no configuration
+
+
+# Read metrics about memory usage
+[[inputs.mem]]
+ # no configuration
+
+
+# Get the number of processes and group them by status
+[[inputs.processes]]
+ # no configuration
+
+
+# Read metrics about swap memory usage
+[[inputs.swap]]
+ # no configuration
+
+
+# Read metrics about system load & uptime
+[[inputs.system]]
+ ## Uncomment to remove deprecated metrics.
+ # fielddrop = ["uptime_format"]
+
+
+# # Gather ActiveMQ metrics
+# [[inputs.activemq]]
+# ## ActiveMQ WebConsole URL
+# url = "http://127.0.0.1:8161"
+#
+# ## Required ActiveMQ Endpoint
+# ## deprecated in 1.11; use the url option
+# # server = "127.0.0.1"
+# # port = 8161
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## Required ActiveMQ webadmin root path
+# # webadmin = "admin"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read stats from aerospike server(s)
+# [[inputs.aerospike]]
+# ## Aerospike servers to connect to (with port)
+# ## This plugin will query all namespaces the aerospike
+# ## server has configured and get stats for them.
+# servers = ["localhost:3000"]
+#
+# # username = "telegraf"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # enable_tls = false
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+#
+# # Feature Options
+# # Add namespace variable to limit the namespaces executed on
+# # Leave blank to do all
+# # disable_query_namespaces = true # default false
+# # namespaces = ["namespace1", "namespace2"]
+#
+# # Enable set level telmetry
+# # query_sets = true # default: false
+# # Add namespace set combinations to limit sets executed on
+# # Leave blank to do all sets
+# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
+#
+# # Histograms
+# # enable_ttl_histogram = true # default: false
+# # enable_object_size_linear_histogram = true # default: false
+#
+# # by default, aerospike produces a 100 bucket histogram
+# # this is not great for most graphing tools, this will allow
+# # the ability to squash this to a smaller number of buckets
+# # num_histogram_buckets = 100 # default: 10
+
+
+# # Read Apache status information (mod_status)
+# [[inputs.apache]]
+# ## An array of URLs to gather from, must be directed at the machine
+# ## readable version of the mod_status page including the auto query string.
+# ## Default is "http://localhost/server-status?auto".
+# urls = ["http://localhost/server-status?auto"]
+#
+# ## Credentials for basic HTTP authentication.
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Monitor APC UPSes connected to apcupsd
+# [[inputs.apcupsd]]
+# # A list of running apcupsd server to connect to.
+# # If not provided will default to tcp://127.0.0.1:3551
+# servers = ["tcp://127.0.0.1:3551"]
+#
+# ## Timeout for dialing server.
+# timeout = "5s"
+
+
+# # Gather metrics from Apache Aurora schedulers
+# [[inputs.aurora]]
+# ## Schedulers are the base addresses of your Aurora Schedulers
+# schedulers = ["http://127.0.0.1:8081"]
+#
+# ## Set of role types to collect metrics from.
+# ##
+# ## The scheduler roles are checked each interval by contacting the
+# ## scheduler nodes; zookeeper is not contacted.
+# # roles = ["leader", "follower"]
+#
+# ## Timeout is the max time for total network operations.
+# # timeout = "5s"
+#
+# ## Username and password are sent using HTTP Basic Auth.
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Gather Azure Storage Queue metrics
+# [[inputs.azure_storage_queue]]
+# ## Required Azure Storage Account name
+# account_name = "mystorageaccount"
+#
+# ## Required Azure Storage Account access key
+# account_key = "storageaccountaccesskey"
+#
+# ## Set to false to disable peeking age of oldest message (executes faster)
+# # peek_oldest_message_age = true
+
+
+# # Read metrics of bcache from stats_total and dirty_data
+# [[inputs.bcache]]
+# ## Bcache sets path
+# ## If not specified, then default is:
+# bcachePath = "/sys/fs/bcache"
+#
+# ## By default, telegraf gather stats for all bcache devices
+# ## Setting devices will restrict the stats to the specified
+# ## bcache devices.
+# bcacheDevs = ["bcache0"]
+
+
+# # Collects Beanstalkd server and tubes stats
+# [[inputs.beanstalkd]]
+# ## Server to collect data from
+# server = "localhost:11300"
+#
+# ## List of tubes to gather stats about.
+# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
+# tubes = ["notifications"]
+
+
+# # Read BIND nameserver XML statistics
+# [[inputs.bind]]
+# ## An array of BIND XML statistics URI to gather stats.
+# ## Default is "http://localhost:8053/xml/v3".
+# # urls = ["http://localhost:8053/xml/v3"]
+# # gather_memory_contexts = false
+# # gather_views = false
+
+
+# # Collect bond interface status, slaves statuses and failures count
+# [[inputs.bond]]
+# ## Sets 'proc' directory path
+# ## If not specified, then default is /proc
+# # host_proc = "/proc"
+#
+# ## By default, telegraf gather stats for all bond interfaces
+# ## Setting interfaces will restrict the stats to the specified
+# ## bond interfaces.
+# # bond_interfaces = ["bond0"]
+
+
+# # Collect Kafka topics and consumers status from Burrow HTTP API.
+# [[inputs.burrow]]
+# ## Burrow API endpoints in format "schema://host:port".
+# ## Default is "http://localhost:8000".
+# servers = ["http://localhost:8000"]
+#
+# ## Override Burrow API prefix.
+# ## Useful when Burrow is behind reverse-proxy.
+# # api_prefix = "/v3/kafka"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Limit per-server concurrent connections.
+# ## Useful in case of large number of topics or consumer groups.
+# # concurrent_connections = 20
+#
+# ## Filter clusters, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # clusters_include = []
+# # clusters_exclude = []
+#
+# ## Filter consumer groups, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # groups_include = []
+# # groups_exclude = []
+#
+# ## Filter topics, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # topics_include = []
+# # topics_exclude = []
+#
+# ## Credentials for basic HTTP authentication.
+# # username = ""
+# # password = ""
+#
+# ## Optional SSL config
+# # ssl_ca = "/etc/telegraf/ca.pem"
+# # ssl_cert = "/etc/telegraf/cert.pem"
+# # ssl_key = "/etc/telegraf/key.pem"
+# # insecure_skip_verify = false
+
+
+# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster.
+# [[inputs.ceph]]
+# ## This is the recommended interval to poll. Too frequent and you will lose
+# ## data points due to timeouts during rebalancing and recovery
+# interval = '1m'
+#
+# ## All configuration values are optional, defaults are shown below
+#
+# ## location of ceph binary
+# ceph_binary = "/usr/bin/ceph"
+#
+# ## directory in which to look for socket files
+# socket_dir = "/var/run/ceph"
+#
+# ## prefix of MON and OSD socket files, used to determine socket type
+# mon_prefix = "ceph-mon"
+# osd_prefix = "ceph-osd"
+# mds_prefix = "ceph-mds"
+# rgw_prefix = "ceph-client"
+#
+# ## suffix used to identify socket files
+# socket_suffix = "asok"
+#
+# ## Ceph user to authenticate as
+# ceph_user = "client.admin"
+#
+# ## Ceph configuration to use to locate the cluster
+# ceph_config = "/etc/ceph/ceph.conf"
+#
+# ## Whether to gather statistics via the admin socket
+# gather_admin_socket_stats = true
+#
+# ## Whether to gather statistics via ceph commands
+# gather_cluster_stats = false
+
+
+# # Read specific statistics per cgroup
+# [[inputs.cgroup]]
+# ## Directories in which to look for files, globs are supported.
+# ## Consider restricting paths to the set of cgroups you really
+# ## want to monitor if you have a large number of cgroups, to avoid
+# ## any cardinality issues.
+# # paths = [
+# # "/sys/fs/cgroup/memory",
+# # "/sys/fs/cgroup/memory/child1",
+# # "/sys/fs/cgroup/memory/child2/*",
+# # ]
+# ## cgroup stat fields, as file names, globs are supported.
+# ## these file names are appended to each path from above.
+# # files = ["memory.*usage*", "memory.limit_in_bytes"]
+
+
+# # Get standard chrony metrics, requires chronyc executable.
+# [[inputs.chrony]]
+# ## If true, chronyc tries to perform a DNS lookup for the time server.
+# # dns_lookup = false
+
+
+# # Pull Metric Statistics from Amazon CloudWatch
+# [[inputs.cloudwatch]]
+# ## Amazon Region
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# # access_key = ""
+# # secret_key = ""
+# # token = ""
+# # role_arn = ""
+# # profile = ""
+# # shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
+# # metrics are made available to the 1 minute period. Some are collected at
+# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
+# # Note that if a period is configured that is smaller than the minimum for a
+# # particular metric, that metric will not be returned by the Cloudwatch API
+# # and will not be collected by Telegraf.
+# #
+# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
+# period = "5m"
+#
+# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
+# delay = "5m"
+#
+# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
+# ## gaps or overlap in pulled data
+# interval = "5m"
+#
+# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored.
+# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours.
+# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain.
+# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old.
+# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
+# #recently_active = "PT3H"
+#
+# ## Configure the TTL for the internal cache of metrics.
+# # cache_ttl = "1h"
+#
+# ## Metric Statistic Namespace (required)
+# namespace = "AWS/ELB"
+#
+# ## Maximum requests per second. Note that the global default AWS rate limit is
+# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
+# ## maximum of 50.
+# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
+# # ratelimit = 25
+#
+# ## Timeout for http requests made by the cloudwatch client.
+# # timeout = "5s"
+#
+# ## Namespace-wide statistic filters. These allow fewer queries to be made to
+# ## cloudwatch.
+# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # statistic_exclude = []
+#
+# ## Metrics to Pull
+# ## Defaults to all Metrics in Namespace if nothing is provided
+# ## Refreshes Namespace available metrics every 1h
+# #[[inputs.cloudwatch.metrics]]
+# # names = ["Latency", "RequestCount"]
+# #
+# # ## Statistic filters for Metric. These allow for retrieving specific
+# # ## statistics for an individual metric.
+# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # # statistic_exclude = []
+# #
+# # ## Dimension filters for Metric. All dimensions defined for the metric names
+# # ## must be specified in order to retrieve the metric statistics.
+# # [[inputs.cloudwatch.metrics.dimensions]]
+# # name = "LoadBalancerName"
+# # value = "p-example"
+
+
+# # Collects conntrack stats from the configured directories and files.
+# [[inputs.conntrack]]
+# ## The following defaults would work with multiple versions of conntrack.
+# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
+# ## kernel versions, as are the directory locations.
+#
+# ## Superset of filenames to look for within the conntrack dirs.
+# ## Missing files will be ignored.
+# files = ["ip_conntrack_count","ip_conntrack_max",
+# "nf_conntrack_count","nf_conntrack_max"]
+#
+# ## Directories to search within for the conntrack files above.
+# ## Missing directories will be ignored.
+# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
+
+
+# # Gather health check statuses from services registered in Consul
+# [[inputs.consul]]
+# ## Consul server address
+# # address = "localhost:8500"
+#
+# ## URI scheme for the Consul server, one of "http", "https"
+# # scheme = "http"
+#
+# ## Metric version controls the mapping from Consul metrics into
+# ## Telegraf metrics.
+# ##
+# ## example: metric_version = 1; deprecated in 1.15
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## ACL token used in every request
+# # token = ""
+#
+# ## HTTP Basic Authentication username and password.
+# # username = ""
+# # password = ""
+#
+# ## Data center to query the health checks from
+# # datacenter = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+#
+# ## Consul checks' tag splitting
+# # When tags are formatted like "key:value" with ":" as a delimiter then
+# # they will be splitted and reported as proper key:value in Telegraf
+# # tag_delimiter = ":"
+
+
+# # Read metrics from one or many couchbase clusters
+# [[inputs.couchbase]]
+# ## specify servers via a url matching:
+# ## [protocol://][:password]@address[:port]
+# ## e.g.
+# ## http://couchbase-0.example.com/
+# ## http://admin:secret@couchbase-0.example.com:8091/
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no protocol is specified, HTTP is used.
+# ## If no port is specified, 8091 is used.
+# servers = ["http://localhost:8091"]
+
+
+# # Read CouchDB Stats from one or more servers
+# [[inputs.couchdb]]
+# ## Works with CouchDB stats endpoints out of the box
+# ## Multiple Hosts from which to read CouchDB stats:
+# hosts = ["http://localhost:8086/_stats"]
+#
+# ## Use HTTP Basic Authentication.
+# # basic_username = "telegraf"
+# # basic_password = "p@ssw0rd"
+
+
+# # Input plugin for DC/OS metrics
+# [[inputs.dcos]]
+# ## The DC/OS cluster URL.
+# cluster_url = "https://dcos-ee-master-1"
+#
+# ## The ID of the service account.
+# service_account_id = "telegraf"
+# ## The private key file for the service account.
+# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
+#
+# ## Path containing login token. If set, will read on every gather.
+# # token_file = "/home/dcos/.dcos/token"
+#
+# ## In all filter options if both include and exclude are empty all items
+# ## will be collected. Arrays may contain glob patterns.
+# ##
+# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
+# ## be collected for its containers or apps.
+# # node_include = []
+# # node_exclude = []
+# ## Container IDs to collect container metrics from.
+# # container_include = []
+# # container_exclude = []
+# ## Container IDs to collect app metrics from.
+# # app_include = []
+# # app_exclude = []
+#
+# ## Maximum concurrent connections to the cluster.
+# # max_connections = 10
+# ## Maximum time to receive a response from cluster.
+# # response_timeout = "20s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+#
+# ## Recommended filtering to reduce series cardinality.
+# # [inputs.dcos.tagdrop]
+# # path = ["/var/lib/mesos/slave/slaves/*"]
+
+
+# # Read metrics from one or many disque servers
+# [[inputs.disque]]
+# ## An array of URI to gather stats about. Specify an ip or hostname
+# ## with optional port and password.
+# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["localhost"]
+
+
+# # Provide a native collection for dmsetup based statistics for dm-cache
+# [[inputs.dmcache]]
+# ## Whether to report per-device stats or not
+# per_device = true
+
+
+# # Query given DNS server and gives statistics
+# [[inputs.dns_query]]
+# ## servers to query
+# servers = ["8.8.8.8"]
+#
+# ## Network is the network protocol name.
+# # network = "udp"
+#
+# ## Domains or subdomains to query.
+# # domains = ["."]
+#
+# ## Query record type.
+# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+# # record_type = "A"
+#
+# ## Dns server port.
+# # port = 53
+#
+# ## Query timeout in seconds.
+# # timeout = 2
+
+
+# # Read metrics about docker containers
+# [[inputs.docker]]
+# ## Docker Endpoint
+# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+# endpoint = "unix:///var/run/docker.sock"
+#
+# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
+# gather_services = false
+#
+# ## Only collect metrics for these containers, collect all if empty
+# container_names = []
+#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# container_name_include = []
+# container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "running" state will be captured.
+# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+# # container_state_include = []
+# # container_state_exclude = []
+#
+# ## Timeout for docker list, info, and stats commands
+# timeout = "5s"
+#
+# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
+# ## network (eth0, eth1, ...) stats or not
+# perdevice = true
+#
+# ## Whether to report for each container total blkio and network stats or not
+# total = false
+#
+# ## Which environment variables should we use as a tag
+# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
+#
+# ## docker labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# docker_label_include = []
+# docker_label_exclude = []
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read statistics from one or many dovecot servers
+# [[inputs.dovecot]]
+# ## specify dovecot servers via an address:port list
+# ## e.g.
+# ## localhost:24242
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["localhost:24242"]
+#
+# ## Type is one of "user", "domain", "ip", or "global"
+# type = "global"
+#
+# ## Wildcard matches like "*.com". An empty string "" is same as "*"
+# ## If type = "ip" filters should be
+# filters = [""]
+
+
+# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints.
+# [[inputs.ecs]]
+# ## ECS metadata url.
+# ## Metadata v2 API is used if set explicitly. Otherwise,
+# ## v3 metadata endpoint API is used if available.
+# # endpoint_url = ""
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "RUNNING" state will be captured.
+# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+# ## "RESOURCES_PROVISIONED", "STOPPED".
+# # container_status_include = []
+# # container_status_exclude = []
+#
+# ## ecs labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# ecs_label_include = [ "com.amazonaws.ecs.*" ]
+# ecs_label_exclude = []
+#
+# ## Timeout for queries.
+# # timeout = "5s"
+
+
+# # Read stats from one or more Elasticsearch servers or clusters
+# [[inputs.elasticsearch]]
+# ## specify a list of one or more Elasticsearch servers
+# # you can add username and password to your url to use basic authentication:
+# # servers = ["http://user:pass@localhost:9200"]
+# servers = ["http://localhost:9200"]
+#
+# ## Timeout for HTTP requests to the elastic search server(s)
+# http_timeout = "5s"
+#
+# ## When local is true (the default), the node will read only its own stats.
+# ## Set local to false when you want to read the node stats from all nodes
+# ## of the cluster.
+# local = true
+#
+# ## Set cluster_health to true when you want to also obtain cluster health stats
+# cluster_health = false
+#
+# ## Adjust cluster_health_level when you want to also obtain detailed health stats
+# ## The options are
+# ## - indices (default)
+# ## - cluster
+# # cluster_health_level = "indices"
+#
+# ## Set cluster_stats to true when you want to also obtain cluster stats.
+# cluster_stats = false
+#
+# ## Only gather cluster_stats from the master node. To work this require local = true
+# cluster_stats_only_from_master = true
+#
+# ## Indices to collect; can be one or more indices names or _all
+# indices_include = ["_all"]
+#
+# ## One of "shards", "cluster", "indices"
+# indices_level = "shards"
+#
+# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
+# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
+# ## "breaker". Per default, all stats are gathered.
+# # node_stats = ["jvm", "http"]
+#
+# ## HTTP Basic Authentication username and password.
+# # username = ""
+# # password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Returns ethtool statistics for given interfaces
+# [[inputs.ethtool]]
+# ## List of interfaces to pull metrics for
+# # interface_include = ["eth0"]
+#
+# ## List of interfaces to ignore when pulling metrics.
+# # interface_exclude = ["eth1"]
+
+
+# # Read metrics from one or more commands that can output to stdout
+# [[inputs.exec]]
+# ## Commands array
+# commands = [
+# "/tmp/test.sh",
+# "/usr/bin/mycollector --foo=bar",
+# "/tmp/collect_*.sh"
+# ]
+#
+# ## Timeout for each command to complete.
+# timeout = "5s"
+#
+# ## measurement name suffix (for separating different commands)
+# name_suffix = "_mycollector"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from fail2ban.
+# [[inputs.fail2ban]]
+# ## Use sudo to run fail2ban-client
+# use_sudo = false
+
+
+# # Read devices value(s) from a Fibaro controller
+# [[inputs.fibaro]]
+# ## Required Fibaro controller address/hostname.
+# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
+# url = "http://:80"
+#
+# ## Required credentials to access the API (http://)
+# username = ""
+# password = ""
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+
+
+# # Parse a complete file each interval
+# [[inputs.file]]
+# ## Files to parse each interval. Accept standard unix glob matching rules,
+# ## as well as ** to match recursive files and directories.
+# files = ["/tmp/metrics.out"]
+#
+# ## Name a tag containing the name of the file the data was parsed from. Leave empty
+# ## to disable.
+# # file_tag = ""
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
+#
+# ## The dataformat to be read from files
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Count files in a directory
+# [[inputs.filecount]]
+# ## Directory to gather stats about.
+# ## deprecated in 1.9; use the directories option
+# # directory = "/var/cache/apt/archives"
+#
+# ## Directories to gather stats about.
+# ## This accept standard unit glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories
+# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories
+# ## /var/log -> count all files in /var/log and all of its subdirectories
+# directories = ["/var/cache/apt/archives"]
+#
+# ## Only count files that match the name pattern. Defaults to "*".
+# name = "*.deb"
+#
+# ## Count files in subdirectories. Defaults to true.
+# recursive = false
+#
+# ## Only count regular files. Defaults to true.
+# regular_only = true
+#
+# ## Follow all symlinks while walking the directory tree. Defaults to false.
+# follow_symlinks = false
+#
+# ## Only count files that are at least this size. If size is
+# ## a negative number, only count files that are smaller than the
+# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
+# ## Without quotes and units, interpreted as size in bytes.
+# size = "0B"
+#
+# ## Only count files that have not been touched for at least this
+# ## duration. If mtime is negative, only count files that have been
+# ## touched in this duration. Defaults to "0s".
+# mtime = "0s"
+
+
+# # Read stats about given file(s)
+# [[inputs.filestat]]
+# ## Files to gather stats about.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## "/var/log/**.log" -> recursively find all .log files in /var/log
+# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+# ## "/var/log/apache.log" -> just tail the apache log file
+# ##
+# ## See https://github.com/gobwas/glob for more examples
+# ##
+# files = ["/var/log/**.log"]
+#
+# ## If true, read the entire file and calculate an md5 checksum.
+# md5 = false
+
+
+# # Read real time temps from fireboard.io servers
+# [[inputs.fireboard]]
+# ## Specify auth token for your account
+# auth_token = "invalidAuthToken"
+# ## You can override the fireboard server URL if necessary
+# # url = https://fireboard.io/api/v1/devices.json
+# ## You can set a different http_timeout if you need to
+# ## You should set a string using an number and time indicator
+# ## for example "12s" for 12 seconds.
+# # http_timeout = "4s"
+
+
+# # Read metrics exposed by fluentd in_monitor plugin
+# [[inputs.fluentd]]
+# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
+# ##
+# ## Endpoint:
+# ## - only one URI is allowed
+# ## - https is not supported
+# endpoint = "http://localhost:24220/api/plugins.json"
+#
+# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
+# exclude = [
+# "monitor_agent",
+# "dummy",
+# ]
+
+
+# # Gather repository information from GitHub hosted repositories.
+# [[inputs.github]]
+# ## List of repositories to monitor.
+# repositories = [
+# "influxdata/telegraf",
+# "influxdata/influxdb"
+# ]
+#
+# ## Github API access token. Unauthenticated requests are limited to 60 per hour.
+# # access_token = ""
+#
+# ## Github API enterprise url. Github Enterprise accounts must specify their base url.
+# # enterprise_base_url = ""
+#
+# ## Timeout for HTTP requests.
+# # http_timeout = "5s"
+
+
+# # Read flattened metrics from one or more GrayLog HTTP endpoints
+# [[inputs.graylog]]
+# ## API endpoint, currently supported API:
+# ##
+# ## - multiple (Ex http://:12900/system/metrics/multiple)
+# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace})
+# ##
+# ## For namespace endpoint, the metrics array will be ignored for that call.
+# ## Endpoint can contain namespace and multiple type calls.
+# ##
+# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
+# ## of endpoints
+# servers = [
+# "http://[graylog-server-ip]:12900/system/metrics/multiple",
+# ]
+#
+# ## Metrics list
+# ## List of metrics can be found on Graylog webservice documentation.
+# ## Or by hitting the the web service api at:
+# ## http://[graylog-host]:12900/system/metrics
+# metrics = [
+# "jvm.cl.loaded",
+# "jvm.memory.pools.Metaspace.committed"
+# ]
+#
+# ## Username and password
+# username = ""
+# password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics of haproxy, via socket or csv stats page
+# [[inputs.haproxy]]
+# ## An array of address to gather stats about. Specify an ip on hostname
+# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
+# ## Make sure you specify the complete path to the stats endpoint
+# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
+#
+# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
+# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## You can also use local socket with standard wildcard globbing.
+# ## Server address not starting with 'http' will be treated as a possible
+# ## socket, so both examples below are valid.
+# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
+#
+# ## By default, some of the fields are renamed from what haproxy calls them.
+# ## Setting this option to true results in the plugin keeping the original
+# ## field names.
+# # keep_field_names = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Monitor disks' temperatures using hddtemp
+# [[inputs.hddtemp]]
+# ## By default, telegraf gathers temps data from all disks detected by the
+# ## hddtemp.
+# ##
+# ## Only collect temps from the selected disks.
+# ##
+# ## A * as the device name will return the temperature values of all disks.
+# ##
+# # address = "127.0.0.1:7634"
+# # devices = ["sda", "*"]
+
+
+# # Read formatted metrics from one or more HTTP endpoints
+# [[inputs.http]]
+# ## One or more URLs from which to read formatted metrics
+# urls = [
+# "http://localhost/metrics"
+# ]
+#
+# ## HTTP method
+# # method = "GET"
+#
+# ## Optional HTTP headers
+# # headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## HTTP entity-body to send with POST/PUT requests.
+# # body = ""
+#
+# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## List of success status codes
+# # success_status_codes = [200]
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+
+
+# # HTTP/HTTPS request given an address a method and a timeout
+# [[inputs.http_response]]
+# ## Deprecated in 1.12, use 'urls'
+# ## Server address (default http://localhost)
+# # address = "http://localhost"
+#
+# ## List of urls to query.
+# # urls = ["http://localhost"]
+#
+# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
+# # http_proxy = "http://localhost:8888"
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## HTTP Request Method
+# # method = "GET"
+#
+# ## Whether to follow redirects from the server (defaults to false)
+# # follow_redirects = false
+#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional HTTP Request Body
+# # body = '''
+# # {'fake':'data'}
+# # '''
+#
+# ## Optional name of the field that will contain the body of the response.
+# ## By default it is set to an empty String indicating that the body's content won't be added
+# # response_body_field = ''
+#
+# ## Maximum allowed HTTP response body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# ## If the response body size exceeds this limit a "body_read_error" will be raised
+# # response_body_max_size = "32MiB"
+#
+# ## Optional substring or regex match in body of the response (case sensitive)
+# # response_string_match = "\"service_status\": \"up\""
+# # response_string_match = "ok"
+# # response_string_match = "\".*_status\".?:.?\"up\""
+#
+# ## Expected response status code.
+# ## The status code of the response is compared to this value. If they match, the field
+# ## "response_status_code_match" will be 1, otherwise it will be 0. If the
+# ## expected status code is 0, the check is disabled and the field won't be added.
+# # response_status_code = 0
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## HTTP Request Headers (all values must be strings)
+# # [inputs.http_response.headers]
+# # Host = "github.com"
+#
+# ## Optional setting to map response http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
+# ## Interface to use when dialing an address
+# # interface = "eth0"
+
+
+# # Read flattened metrics from one or more JSON HTTP endpoints
+# [[inputs.httpjson]]
+# ## NOTE This plugin only reads numerical measurements, strings and booleans
+# ## will be ignored.
+#
+# ## Name for the service being polled. Will be appended to the name of the
+# ## measurement e.g. httpjson_webserver_stats
+# ##
+# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
+# name = "webserver_stats"
+#
+# ## URL of each server in the service's cluster
+# servers = [
+# "http://localhost:9999/stats/",
+# "http://localhost:9998/stats/",
+# ]
+# ## Set response_timeout (default 5 seconds)
+# response_timeout = "5s"
+#
+# ## HTTP method to use: GET or POST (case-sensitive)
+# method = "GET"
+#
+# ## List of tag names to extract from top-level of JSON server response
+# # tag_keys = [
+# # "my_tag_1",
+# # "my_tag_2"
+# # ]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## HTTP parameters (all values must be strings). For "GET" requests, data
+# ## will be included in the query. For "POST" requests, data will be included
+# ## in the request body as "x-www-form-urlencoded".
+# # [inputs.httpjson.parameters]
+# # event_type = "cpu_spike"
+# # threshold = "0.75"
+#
+# ## HTTP Headers (all values must be strings)
+# # [inputs.httpjson.headers]
+# # X-Auth-Token = "my-xauth-token"
+# # apiVersion = "v1"
+
+
+# # Gather Icinga2 status
+# [[inputs.icinga2]]
+# ## Required Icinga2 server address
+# # server = "https://localhost:5665"
+#
+# ## Required Icinga2 object type ("services" or "hosts")
+# # object_type = "services"
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+
+
+# # Gets counters from all InfiniBand cards and ports installed
+# [[inputs.infiniband]]
+# # no configuration
+
+
+# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
+# [[inputs.influxdb]]
+# ## Works with InfluxDB debug endpoints out of the box,
+# ## but other services can use this format too.
+# ## See the influxdb plugin's README for more details.
+#
+# ## Multiple URLs from which to read InfluxDB-formatted JSON
+# ## Default is "http://localhost:8086/debug/vars".
+# urls = [
+# "http://localhost:8086/debug/vars"
+# ]
+#
+# ## Username and password to send using HTTP Basic Authentication.
+# # username = ""
+# # password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## http request & header timeout
+# timeout = "5s"
+
+
+# # Collect statistics about itself
+# [[inputs.internal]]
+# ## If true, collect telegraf memory stats.
+# # collect_memstats = true
+
+
+# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
+# [[inputs.interrupts]]
+# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is
+# ## stored as a field.
+# ##
+# ## The default is false for backwards compatibility, and will be changed to
+# ## true in a future version. It is recommended to set to true on new
+# ## deployments.
+# # cpu_as_tag = false
+#
+# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
+# # [inputs.interrupts.tagdrop]
+# # irq = [ "NET_RX", "TASKLET" ]
+
+
+# # Read metrics from the bare metal servers via IPMI
+# [[inputs.ipmi_sensor]]
+# ## optionally specify the path to the ipmitool executable
+# # path = "/usr/bin/ipmitool"
+# ##
+# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
+# ## Sudo must be configured to allow the telegraf user to run ipmitool
+# ## without a password.
+# # use_sudo = false
+# ##
+# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
+# # privilege = "ADMINISTRATOR"
+# ##
+# ## optionally specify one or more servers via a url matching
+# ## [username[:password]@][protocol[(address)]]
+# ## e.g.
+# ## root:passwd@lan(127.0.0.1)
+# ##
+# ## if no servers are specified, local machine sensor stats will be queried
+# ##
+# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+#
+# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
+# ## gaps or overlap in pulled data
+# interval = "30s"
+#
+# ## Timeout for the ipmitool command to complete
+# timeout = "20s"
+#
+# ## Schema Version: (Optional, defaults to version 1)
+# metric_version = 2
+
+
+# # Gather packets and bytes counters from Linux ipsets
+# [[inputs.ipset]]
+# ## By default, we only show sets which have already matched at least 1 packet.
+# ## set include_unmatched_sets = true to gather them all.
+# include_unmatched_sets = false
+# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
+# use_sudo = false
+# ## The default timeout of 1s for ipset execution can be overridden here:
+# # timeout = "1s"
+
+
+# # Gather packets and bytes throughput from iptables
+# [[inputs.iptables]]
+# ## iptables require root access on most systems.
+# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
+# ## Users must configure sudo to allow telegraf user to run iptables with no password.
+# ## iptables can be restricted to only list command "iptables -nvL".
+# use_sudo = false
+# ## Setting 'use_lock' to true runs iptables with the "-w" option.
+# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
+# use_lock = false
+# ## Define an alternate executable, such as "ip6tables". Default is "iptables".
+# # binary = "ip6tables"
+# ## defines the table to monitor:
+# table = "filter"
+# ## defines the chains to monitor.
+# ## NOTE: iptables rules without a comment will not be monitored.
+# ## Read the plugin documentation for more information.
+# chains = [ "INPUT" ]
+
+
+# # Collect virtual and real server stats from Linux IPVS
+# [[inputs.ipvs]]
+# # no configuration
+
+
+# # Read jobs and cluster metrics from Jenkins instances
+# [[inputs.jenkins]]
+# ## The Jenkins URL in the format "schema://host:port"
+# url = "http://my-jenkins-instance:8080"
+# # username = "admin"
+# # password = "admin"
+#
+# ## Set response_timeout
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use SSL but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional Max Job Build Age filter
+# ## Default 1 hour, ignore builds older than max_build_age
+# # max_build_age = "1h"
+#
+# ## Optional Sub Job Depth filter
+# ## Jenkins can have unlimited layer of sub jobs
+# ## This config will limit the layers of pulling, default value 0 means
+# ## unlimited pulling until no more sub jobs
+# # max_subjob_depth = 0
+#
+# ## Optional Sub Job Per Layer
+# ## In workflow-multibranch-plugin, each branch will be created as a sub job.
+# ## This config will limit to call only the lasted branches in each layer,
+# ## empty will use default value 10
+# # max_subjob_per_layer = 10
+#
+# ## Jobs to exclude from gathering
+# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
+#
+# ## Nodes to exclude from gathering
+# # node_exclude = [ "node1", "node2" ]
+#
+# ## Worker pool for jenkins plugin only
+# ## Empty this field will use default value 5
+# # max_connections = 5
+
+
+# # Read JMX metrics through Jolokia
+# [[inputs.jolokia]]
+# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
+# # jolokia2 plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
+#
+# ## This is the context root used to compose the jolokia url
+# ## NOTE that Jolokia requires a trailing slash at the end of the context root
+# ## NOTE that your jolokia security policy must allow for POST requests.
+# context = "/jolokia/"
+#
+# ## This specifies the mode used
+# # mode = "proxy"
+# #
+# ## When in proxy mode this section is used to specify further
+# ## proxy address configurations.
+# ## Remember to change host address to fit your environment.
+# # [inputs.jolokia.proxy]
+# # host = "127.0.0.1"
+# # port = "8080"
+#
+# ## Optional http timeouts
+# ##
+# ## response_header_timeout, if non-zero, specifies the amount of time to wait
+# ## for a server's response headers after fully writing the request.
+# # response_header_timeout = "3s"
+# ##
+# ## client_timeout specifies a time limit for requests made by this client.
+# ## Includes connection time, any redirects, and reading the response body.
+# # client_timeout = "4s"
+#
+# ## Attribute delimiter
+# ##
+# ## When multiple attributes are returned for a single
+# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
+# ## name, and the attribute name, separated by the given delimiter.
+# # delimiter = "_"
+#
+# ## List of servers exposing jolokia read service
+# [[inputs.jolokia.servers]]
+# name = "as-server-01"
+# host = "127.0.0.1"
+# port = "8080"
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## List of metrics collected on above servers
+# ## Each metric consists in a name, a jmx path and either
+# ## a pass or drop slice attribute.
+# ##Â This collect all heap memory usage metrics.
+# [[inputs.jolokia.metrics]]
+# name = "heap_memory_usage"
+# mbean = "java.lang:type=Memory"
+# attribute = "HeapMemoryUsage"
+#
+# ##Â This collect thread counts metrics.
+# [[inputs.jolokia.metrics]]
+# name = "thread_count"
+# mbean = "java.lang:type=Threading"
+# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
+#
+# ##Â This collect number of class loaded/unloaded counts metrics.
+# [[inputs.jolokia.metrics]]
+# name = "class_count"
+# mbean = "java.lang:type=ClassLoading"
+# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
+
+
+# # Read JMX metrics from a Jolokia REST agent endpoint
+# [[inputs.jolokia2_agent]]
+# # default_tag_prefix = ""
+# # default_field_prefix = ""
+# # default_field_separator = "."
+#
+# # Add agents URLs to query
+# urls = ["http://localhost:8080/jolokia"]
+# # username = ""
+# # password = ""
+# # response_timeout = "5s"
+#
+# ## Optional TLS config
+# # tls_ca = "/var/private/ca.pem"
+# # tls_cert = "/var/private/client.pem"
+# # tls_key = "/var/private/client-key.pem"
+# # insecure_skip_verify = false
+#
+# ## Add metrics to read
+# [[inputs.jolokia2_agent.metric]]
+# name = "java_runtime"
+# mbean = "java.lang:type=Runtime"
+# paths = ["Uptime"]
+
+
+# # Read JMX metrics from a Jolokia REST proxy endpoint
+# [[inputs.jolokia2_proxy]]
+# # default_tag_prefix = ""
+# # default_field_prefix = ""
+# # default_field_separator = "."
+#
+# ## Proxy agent
+# url = "http://localhost:8080/jolokia"
+# # username = ""
+# # password = ""
+# # response_timeout = "5s"
+#
+# ## Optional TLS config
+# # tls_ca = "/var/private/ca.pem"
+# # tls_cert = "/var/private/client.pem"
+# # tls_key = "/var/private/client-key.pem"
+# # insecure_skip_verify = false
+#
+# ## Add proxy targets to query
+# # default_target_username = ""
+# # default_target_password = ""
+# [[inputs.jolokia2_proxy.target]]
+# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
+# # username = ""
+# # password = ""
+#
+# ## Add metrics to read
+# [[inputs.jolokia2_proxy.metric]]
+# name = "java_runtime"
+# mbean = "java.lang:type=Runtime"
+# paths = ["Uptime"]
+
+
+# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
+# [[inputs.kapacitor]]
+# ## Multiple URLs from which to read Kapacitor-formatted JSON
+# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
+# urls = [
+# "http://localhost:9092/kapacitor/v1/debug/vars"
+# ]
+#
+# ## Time limit for http requests
+# timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Get kernel statistics from /proc/vmstat
+# [[inputs.kernel_vmstat]]
+# # no configuration
+
+
+# # Read status information from one or more Kibana servers
+# [[inputs.kibana]]
+# ## Specify a list of one or more Kibana servers
+# servers = ["http://localhost:5601"]
+#
+# ## Timeout for HTTP requests
+# timeout = "5s"
+#
+# ## HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from the Kubernetes api
+# [[inputs.kube_inventory]]
+# ## URL for the Kubernetes API
+# url = "https://127.0.0.1"
+#
+# ## Namespace to use. Set to "" to use all namespaces.
+# # namespace = "default"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## Optional Resources to exclude from gathering
+# ## Leave them with blank with try to gather everything available.
+# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
+# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
+# # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
+#
+# ## Optional Resources to include when gathering
+# ## Overrides resource_exclude if both set.
+# # resource_include = [ "deployments", "nodes", "statefulsets" ]
+#
+# ## selectors to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all selectors as tags
+# ## selector_exclude overrides selector_include if both set.
+# # selector_include = []
+# # selector_exclude = ["*"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/path/to/cafile"
+# # tls_cert = "/path/to/certfile"
+# # tls_key = "/path/to/keyfile"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from the kubernetes kubelet api
+# [[inputs.kubernetes]]
+# ## URL for the kubelet
+# url = "http://127.0.0.1:10255"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## Pod labels to be added as tags. An empty array for both include and
+# ## exclude will include all labels.
+# # label_include = []
+# # label_exclude = ["*"]
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = /path/to/cafile
+# # tls_cert = /path/to/certfile
+# # tls_key = /path/to/keyfile
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from a LeoFS Server via SNMP
+# [[inputs.leofs]]
+# ## An array of URLs of the form:
+# ## host [ ":" port]
+# servers = ["127.0.0.1:4020"]
+
+
+# # Provides Linux sysctl fs metrics
+# [[inputs.linux_sysctl_fs]]
+# # no configuration
+
+
+# # Read metrics exposed by Logstash
+# [[inputs.logstash]]
+# ## The URL of the exposed Logstash API endpoint.
+# url = "http://127.0.0.1:9600"
+#
+# ## Use Logstash 5 single pipeline API, set to true when monitoring
+# ## Logstash 5.
+# # single_pipeline = false
+#
+# ## Enable optional collection components. Can contain
+# ## "pipelines", "process", and "jvm".
+# # collect = ["pipelines", "process", "jvm"]
+#
+# ## Timeout for HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional HTTP Basic Auth credentials.
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Use TLS but skip chain & host verification.
+# # insecure_skip_verify = false
+#
+# ## Optional HTTP headers.
+# # [inputs.logstash.headers]
+# # "X-Special-Header" = "Special-Value"
+
+
+# # Read metrics from local Lustre service on OST, MDS
+# [[inputs.lustre2]]
+# ## An array of /proc globs to search for Lustre stats
+# ## If not specified, the default will work on Lustre 2.5.x
+# ##
+# # ost_procfiles = [
+# # "/proc/fs/lustre/obdfilter/*/stats",
+# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
+# # "/proc/fs/lustre/obdfilter/*/job_stats",
+# # ]
+# # mds_procfiles = [
+# # "/proc/fs/lustre/mdt/*/md_stats",
+# # "/proc/fs/lustre/mdt/*/job_stats",
+# # ]
+
+
+# # Gathers metrics from the /3.0/reports MailChimp API
+# [[inputs.mailchimp]]
+# ## MailChimp API key
+# ## get from https://admin.mailchimp.com/account/api/
+# api_key = "" # required
+# ## Reports for campaigns sent more than days_old ago will not be collected.
+# ## 0 means collect all.
+# days_old = 0
+# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
+# # campaign_id = ""
+
+
+# # Retrieves information on a specific host in a MarkLogic Cluster
+# [[inputs.marklogic]]
+# ## Base URL of the MarkLogic HTTP Server.
+# url = "http://localhost:8002"
+#
+# ## List of specific hostnames to retrieve information. At least (1) required.
+# # hosts = ["hostname1", "hostname2"]
+#
+# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from one or many mcrouter servers
+# [[inputs.mcrouter]]
+# ## An array of address to gather stats about. Specify an ip or hostname
+# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
+# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
+#
+# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
+# # timeout = "5s"
+
+
+# # Read metrics from one or many memcached servers
+# [[inputs.memcached]]
+# ## An array of address to gather stats about. Specify an ip on hostname
+# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
+# servers = ["localhost:11211"]
+# # unix_sockets = ["/var/run/memcached.sock"]
+
+
+# # Telegraf plugin for gathering metrics from N Mesos masters
+# [[inputs.mesos]]
+# ## Timeout, in ms.
+# timeout = 100
+#
+# ## A list of Mesos masters.
+# masters = ["http://localhost:5050"]
+#
+# ## Master metrics groups to be collected, by default, all enabled.
+# master_collections = [
+# "resources",
+# "master",
+# "system",
+# "agents",
+# "frameworks",
+# "framework_offers",
+# "tasks",
+# "messages",
+# "evqueue",
+# "registrar",
+# "allocator",
+# ]
+#
+# ## A list of Mesos slaves, default is []
+# # slaves = []
+#
+# ## Slave metrics groups to be collected, by default, all enabled.
+# # slave_collections = [
+# # "resources",
+# # "agent",
+# # "system",
+# # "executors",
+# # "tasks",
+# # "messages",
+# # ]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Collects scores from a Minecraft server's scoreboard using the RCON protocol
+# [[inputs.minecraft]]
+# ## Address of the Minecraft server.
+# # server = "localhost"
+#
+# ## Server RCON Port.
+# # port = "25575"
+#
+# ## Server RCON Password.
+# password = ""
+#
+# ## Uncomment to remove deprecated metric components.
+# # tagdrop = ["server"]
+
+
+# # Retrieve data from MODBUS slave devices
+# [[inputs.modbus]]
+# ## Connection Configuration
+# ##
+# ## The plugin supports connections to PLCs via MODBUS/TCP or
+# ## via serial line communication in binary (RTU) or readable (ASCII) encoding
+# ##
+# ## Device name
+# name = "Device"
+#
+# ## Slave ID - addresses a MODBUS device on the bus
+# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
+# slave_id = 1
+#
+# ## Timeout for each request
+# timeout = "1s"
+#
+# ## Maximum number of retries and the time to wait between retries
+# ## when a slave-device is busy.
+# # busy_retries = 0
+# # busy_retries_wait = "100ms"
+#
+# # TCP - connect via Modbus/TCP
+# controller = "tcp://localhost:502"
+#
+# ## Serial (RS485; RS232)
+# # controller = "file:///dev/ttyUSB0"
+# # baud_rate = 9600
+# # data_bits = 8
+# # parity = "N"
+# # stop_bits = 1
+# # transmission_mode = "RTU"
+#
+#
+# ## Measurements
+# ##
+#
+# ## Digital Variables, Discrete Inputs and Coils
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## address - variable address
+#
+# discrete_inputs = [
+# { name = "start", address = [0]},
+# { name = "stop", address = [1]},
+# { name = "reset", address = [2]},
+# { name = "emergency_stop", address = [3]},
+# ]
+# coils = [
+# { name = "motor1_run", address = [0]},
+# { name = "motor1_jog", address = [1]},
+# { name = "motor1_stop", address = [2]},
+# ]
+#
+# ## Analog Variables, Input Registers and Holding Registers
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## byte_order - the ordering of bytes
+# ## |---AB, ABCD - Big Endian
+# ## |---BA, DCBA - Little Endian
+# ## |---BADC - Mid-Big Endian
+# ## |---CDAB - Mid-Little Endian
+# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation)
+# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
+# ## scale - the final numeric variable representation
+# ## address - variable address
+#
+# holding_registers = [
+# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
+# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
+# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
+# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
+# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
+# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
+# ]
+# input_registers = [
+# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
+# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
+# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
+# ]
+
+
+# # Read metrics from one or many MongoDB servers
+# [[inputs.mongodb]]
+# ## An array of URLs of the form:
+# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
+# ## For example:
+# ## mongodb://user:auth_key@10.10.3.30:27017,
+# ## mongodb://10.10.3.33:18832,
+# servers = ["mongodb://127.0.0.1:27017"]
+#
+# ## When true, collect cluster status
+# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
+# ## may have an impact on performance.
+# # gather_cluster_status = true
+#
+# ## When true, collect per database stats
+# # gather_perdb_stats = false
+#
+# ## When true, collect per collection stats
+# # gather_col_stats = false
+#
+# ## List of db where collections stats are collected
+# ## If empty, all db are concerned
+# # col_stats_dbs = ["local"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics and status information about processes managed by Monit
+# [[inputs.monit]]
+# ## Monit HTTPD address
+# address = "http://127.0.0.1:2812"
+#
+# ## Username and Password for Monit
+# # username = ""
+# # password = ""
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Aggregates the contents of multiple files into a single point
+# [[inputs.multifile]]
+# ## Base directory where telegraf will look for files.
+# ## Omit this option to use absolute paths.
+# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
+#
+# ## If true, Telegraf discard all data when a single file can't be read.
+# ## Else, Telegraf omits the field generated from this file.
+# # fail_early = true
+#
+# ## Files to parse each interval.
+# [[inputs.multifile.file]]
+# file = "in_pressure_input"
+# dest = "pressure"
+# conversion = "float"
+# [[inputs.multifile.file]]
+# file = "in_temp_input"
+# dest = "temperature"
+# conversion = "float(3)"
+# [[inputs.multifile.file]]
+# file = "in_humidityrelative_input"
+# dest = "humidityrelative"
+# conversion = "float(3)"
+
+
+# # Read metrics from one or many mysql servers
+#[[inputs.mysql]]
+# ## specify servers via a url matching:
+# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
+# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
+# ## e.g.
+# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
+# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
+# #
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["admin:JambonzR0ck$@tcp(aurora-cluster-jambonz.cluster-c9hzpr8ulflh.us-west-1.rds.amazonaws.com:3306)/"]
+#
+# ## Selects the metric output format.
+# ##
+# ## This option exists to maintain backwards compatibility, if you have
+# ## existing metrics do not set or change this value until you are ready to
+# ## migrate to the new format.
+# ##
+# ## If you do not have existing metrics from this plugin set to the latest
+# ## version.
+# ##
+# ## Telegraf >=1.6: metric_version = 2
+# ## <1.6: metric_version = 1 (or unset)
+# metric_version = 2
+#
+# ## if the list is empty, then metrics are gathered from all database tables
+# table_schema_databases = ["jambones"]
+#
+# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
+# # gather_table_schema = false
+#
+# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
+# # gather_process_list = false
+#
+# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
+# # gather_user_statistics = false
+#
+# ## gather auto_increment columns and max values from information schema
+# # gather_info_schema_auto_inc = false
+#
+# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
+# # gather_innodb_metrics = false
+#
+# ## gather metrics from SHOW SLAVE STATUS command output
+# # gather_slave_status = false
+#
+# ## gather metrics from SHOW BINARY LOGS command output
+# # gather_binary_logs = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES
+# # gather_global_variables = true
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
+# # gather_table_io_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
+# # gather_table_lock_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
+# # gather_index_io_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
+# # gather_event_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
+# # gather_file_events_stats = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
+# # gather_perf_events_statements = false
+#
+# ## the limits for metrics form perf_events_statements
+# # perf_events_statements_digest_text_limit = 120
+# # perf_events_statements_limit = 250
+# # perf_events_statements_time_limit = 86400
+#
+# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
+# ## example: interval_slow = "30m"
+# # interval_slow = ""
+#
+# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Provides metrics about the state of a NATS server
+# [[inputs.nats]]
+# ## The address of the monitoring endpoint of the NATS server
+# server = "http://localhost:8222"
+#
+# ## Maximum time to receive response
+# # response_timeout = "5s"
+
+
+# # Neptune Apex data collector
+# [[inputs.neptune_apex]]
+# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
+# ## Measurements will be logged under "apex".
+#
+# ## The base URL of the local Apex(es). If you specify more than one server, they will
+# ## be differentiated by the "source" tag.
+# servers = [
+# "http://apex.local",
+# ]
+#
+# ## The response_timeout specifies how long to wait for a reply from the Apex.
+# #response_timeout = "5s"
+
+
+# # Read metrics about network interface usage
+[[inputs.net]]
+# ## By default, telegraf gathers stats from any up interface (excluding loopback)
+# ## Setting interfaces will tell it to gather these explicit interfaces,
+# ## regardless of status.
+# ##
+# # interfaces = ["eth0"]
+# ##
+# ## On linux systems telegraf also collects protocol stats.
+# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
+# ##
+# # ignore_protocol_stats = false
+# ##
+
+
+# # Collect response time of a TCP or UDP connection
+# [[inputs.net_response]]
+# ## Protocol, must be "tcp" or "udp"
+# ## NOTE: because the "udp" protocol does not respond to requests, it requires
+# ## a send/expect string pair (see below).
+# protocol = "tcp"
+# ## Server address (default localhost)
+# address = "localhost:80"
+#
+# ## Set timeout
+# # timeout = "1s"
+#
+# ## Set read timeout (only used if expecting a response)
+# # read_timeout = "1s"
+#
+# ## The following options are required for UDP checks. For TCP, they are
+# ## optional. The plugin will send the given string to the server and then
+# ## expect to receive the given 'expect' string back.
+# ## string sent to the server
+# # send = "ssh"
+# ## expected string in answer
+# # expect = "ssh"
+#
+# ## Uncomment to remove deprecated fields
+# # fielddrop = ["result_type", "string_found"]
+
+
+# # Read TCP metrics such as established, time wait and sockets counts.
+[[inputs.netstat]]
+# # no configuration
+
+
+# # Read Nginx's basic status information (ngx_http_stub_status_module)
+# [[inputs.nginx]]
+# # An array of Nginx stub_status URI to gather stats.
+# urls = ["http://localhost/server_status"]
+#
+# ## Optional TLS Config
+# tls_ca = "/etc/telegraf/ca.pem"
+# tls_cert = "/etc/telegraf/cert.cer"
+# tls_key = "/etc/telegraf/key.key"
+# ## Use TLS but skip chain & host verification
+# insecure_skip_verify = false
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+
+
+# # Read Nginx Plus' full status information (ngx_http_status_module)
+# [[inputs.nginx_plus]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx Plus Api documentation
+# [[inputs.nginx_plus_api]]
+# ## An array of API URI to gather stats.
+# urls = ["http://localhost/api"]
+#
+# # Nginx API version, default: 3
+# # api_version = 3
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx virtual host traffic status module information (nginx-module-sts)
+# [[inputs.nginx_sts]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# ## HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
+# [[inputs.nginx_upstream_check]]
+# ## An URL where Nginx Upstream check module is enabled
+# ## It should be set to return a JSON formatted response
+# url = "http://127.0.0.1/status?format=json"
+#
+# ## HTTP method
+# # method = "GET"
+#
+# ## Optional HTTP headers
+# # headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## Override HTTP "Host" header
+# # host_header = "check.example.com"
+#
+# ## Timeout for HTTP requests
+# timeout = "5s"
+#
+# ## Optional HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx virtual host traffic status module information (nginx-module-vts)
+# [[inputs.nginx_vts]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# ## HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # A plugin to collect stats from the NSD authoritative DNS name server
+# [[inputs.nsd]]
+# ## Address of server to connect to, optionally ':port'. Defaults to the
+# ## address in the nsd config file.
+# server = "127.0.0.1:8953"
+#
+# ## If running as a restricted user you can prepend sudo for additional access:
+# # use_sudo = false
+#
+# ## The default location of the nsd-control binary can be overridden with:
+# # binary = "/usr/sbin/nsd-control"
+#
+# ## The default location of the nsd config file can be overridden with:
+# # config_file = "/etc/nsd/nsd.conf"
+#
+# ## The default timeout of 1s can be overridden with:
+# # timeout = "1s"
+
+
+# # Read NSQ topic and channel statistics.
+# [[inputs.nsq]]
+# ## An array of NSQD HTTP API endpoints
+# endpoints = ["http://localhost:4151"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Collect kernel snmp counters and network interface statistics
+# [[inputs.nstat]]
+# ## file paths for proc files. If empty default paths will be used:
+# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
+# ## These can also be overridden with env variables, see README.
+# proc_net_netstat = "/proc/net/netstat"
+# proc_net_snmp = "/proc/net/snmp"
+# proc_net_snmp6 = "/proc/net/snmp6"
+# ## dump metrics with 0 values too
+# dump_zeros = true
+
+
+# # Get standard NTP query metrics, requires ntpq executable.
+# [[inputs.ntpq]]
+# ## If false, set the -n ntpq flag. Can reduce metric gather time.
+# dns_lookup = true
+
+
+# # Pulls statistics from nvidia GPUs attached to the host
+# [[inputs.nvidia_smi]]
+# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
+# # bin_path = "/usr/bin/nvidia-smi"
+#
+# ## Optional: timeout for GPU polling
+# # timeout = "5s"
+
+
+# # Retrieve data from OPCUA devices
+# [[inputs.opcua]]
+# [[inputs.opcua]]
+# ## Device name
+# # name = "localhost"
+# #
+# ## OPC UA Endpoint URL
+# # endpoint = "opc.tcp://localhost:4840"
+# #
+# ## Maximum time allowed to establish a connect to the endpoint.
+# # connect_timeout = "10s"
+# #
+# ## Maximum time allowed for a request over the estabilished connection.
+# # request_timeout = "5s"
+# #
+# ## Security policy, one of "None", "Basic128Rsa15", "Basic256",
+# ## "Basic256Sha256", or "auto"
+# # security_policy = "auto"
+# #
+# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
+# # security_mode = "auto"
+# #
+# ## Path to cert.pem. Required when security mode or policy isn't "None".
+# ## If cert path is not supplied, self-signed cert and key will be generated.
+# # certificate = "/etc/telegraf/cert.pem"
+# #
+# ## Path to private key.pem. Required when security mode or policy isn't "None".
+# ## If key path is not supplied, self-signed cert and key will be generated.
+# # private_key = "/etc/telegraf/key.pem"
+# #
+# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
+# ## authenticate using a specific ID, select 'Certificate' or 'UserName'
+# # auth_method = "Anonymous"
+# #
+# ## Username. Required for auth_method = "UserName"
+# # username = ""
+# #
+# ## Password. Required for auth_method = "UserName"
+# # password = ""
+# #
+# ## Node ID configuration
+# ## name - the variable name
+# ## namespace - integer value 0 thru 3
+# ## identifier_type - s=string, i=numeric, g=guid, b=opaque
+# ## identifier - tag as shown in opcua browser
+# ## data_type - boolean, byte, short, int, uint, uint16, int16,
+# ## uint32, int32, float, double, string, datetime, number
+# ## Example:
+# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"}
+# nodes = [
+# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+# ]
+
+
+# # OpenLDAP cn=Monitor plugin
+# [[inputs.openldap]]
+# host = "localhost"
+# port = 389
+#
+# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
+# # note that port will likely need to be changed to 636 for ldaps
+# # valid options: "" | "starttls" | "ldaps"
+# tls = ""
+#
+# # skip peer certificate verification. Default is false.
+# insecure_skip_verify = false
+#
+# # Path to PEM-encoded Root certificate to use to verify server certificate
+# tls_ca = "/etc/ssl/certs.pem"
+#
+# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
+# bind_dn = ""
+# bind_password = ""
+#
+# # Reverse metric names so they sort more naturally. Recommended.
+# # This defaults to false if unset, but is set to true when generating a new config
+# reverse_metric_names = true
+
+
+# # Get standard NTP query metrics from OpenNTPD.
+# [[inputs.openntpd]]
+# ## Run ntpctl binary with sudo.
+# # use_sudo = false
+#
+# ## Location of the ntpctl binary.
+# # binary = "/usr/sbin/ntpctl"
+#
+# ## Maximum time the ntpctl binary is allowed to run.
+# # timeout = "5ms"
+
+
+# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
+# [[inputs.opensmtpd]]
+# ## If running as a restricted user you can prepend sudo for additional access:
+# #use_sudo = false
+#
+# ## The default location of the smtpctl binary can be overridden with:
+# binary = "/usr/sbin/smtpctl"
+#
+# ## The default timeout of 1000ms can be overridden with (in milliseconds):
+# timeout = 1000
+
+
+# # Read current weather and forecasts data from openweathermap.org
+# [[inputs.openweathermap]]
+# ## OpenWeatherMap API key.
+# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+#
+# ## City ID's to collect weather data from.
+# city_id = ["5391959"]
+#
+# ## Language of the description field. Can be one of "ar", "bg",
+# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
+# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
+# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
+# # lang = "en"
+#
+# ## APIs to fetch; can contain "weather" or "forecast".
+# fetch = ["weather", "forecast"]
+#
+# ## OpenWeatherMap base URL
+# # base_url = "https://api.openweathermap.org/"
+#
+# ## Timeout for HTTP response.
+# # response_timeout = "5s"
+#
+# ## Preferred unit system for temperature and wind speed. Can be one of
+# ## "metric", "imperial", or "standard".
+# # units = "metric"
+#
+# ## Query interval; OpenWeatherMap updates their weather data every 10
+# ## minutes.
+# interval = "10m"
+
+
+# # Read metrics of passenger using passenger-status
+# [[inputs.passenger]]
+# ## Path of passenger-status.
+# ##
+# ## Plugin gather metric via parsing XML output of passenger-status
+# ## More information about the tool:
+# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
+# ##
+# ## If no path is specified, then the plugin simply execute passenger-status
+# ## hopefully it can be found in your PATH
+# command = "passenger-status -v --show=xml"
+
+
+# # Gather counters from PF
+# [[inputs.pf]]
+# ## PF require root access on most systems.
+# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
+# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
+# ## pfctl can be restricted to only list command "pfctl -s info".
+# use_sudo = false
+
+
+# # Read metrics of phpfpm, via HTTP status page or socket
+# [[inputs.phpfpm]]
+# ## An array of addresses to gather stats about. Specify an ip or hostname
+# ## with optional port and path
+# ##
+# ## Plugin can be configured in three modes (either can be used):
+# ## - http: the URL must start with http:// or https://, ie:
+# ## "http://localhost/status"
+# ## "http://192.168.130.1/status?full"
+# ##
+# ## - unixsocket: path to fpm socket, ie:
+# ## "/var/run/php5-fpm.sock"
+# ## or using a custom fpm status path:
+# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
+# ##
+# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
+# ## "fcgi://10.0.0.12:9000/status"
+# ## "cgi://10.0.10.12:9001/status"
+# ##
+# ## Example of multiple gathering from local socket and remote host
+# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
+# urls = ["http://localhost/status"]
+#
+# ## Duration allowed to complete HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Ping given url(s) and return statistics
+# [[inputs.ping]]
+# ## Hosts to send ping packets to.
+# urls = ["example.org"]
+#
+# ## Method used for sending pings, can be either "exec" or "native". When set
+# ## to "exec" the systems ping command will be executed. When set to "native"
+# ## the plugin will send pings directly.
+# ##
+# ## While the default is "exec" for backwards compatibility, new deployments
+# ## are encouraged to use the "native" method for improved compatibility and
+# ## performance.
+# # method = "exec"
+#
+# ## Number of ping packets to send per interval. Corresponds to the "-c"
+# ## option of the ping command.
+# # count = 1
+#
+# ## Time to wait between sending ping packets in seconds. Operates like the
+# ## "-i" option of the ping command.
+# # ping_interval = 1.0
+#
+# ## If set, the time to wait for a ping response in seconds. Operates like
+# ## the "-W" option of the ping command.
+# # timeout = 1.0
+#
+# ## If set, the total ping deadline, in seconds. Operates like the -w option
+# ## of the ping command.
+# # deadline = 10
+#
+# ## Interface or source address to send ping from. Operates like the -I or -S
+# ## option of the ping command.
+# # interface = ""
+#
+# ## Specify the ping executable binary.
+# # binary = "ping"
+#
+# ## Arguments for ping command. When arguments is not empty, the command from
+# ## the binary option will be used and other options (ping_interval, timeout,
+# ## etc) will be ignored.
+# # arguments = ["-c", "3"]
+#
+# ## Use only IPv6 addresses when resolving a hostname.
+# # ipv6 = false
+
+
+# # Measure postfix queue statistics
+# [[inputs.postfix]]
+# ## Postfix queue directory. If not provided, telegraf will try to use
+# ## 'postconf -h queue_directory' to determine it.
+# # queue_directory = "/var/spool/postfix"
+
+
+# # Read metrics from one or many PowerDNS servers
+# [[inputs.powerdns]]
+# ## An array of sockets to gather stats about.
+# ## Specify a path to unix socket.
+# unix_sockets = ["/var/run/pdns.controlsocket"]
+
+
+# # Read metrics from one or many PowerDNS Recursor servers
+# [[inputs.powerdns_recursor]]
+# ## Path to the Recursor control socket.
+# unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
+#
+# ## Directory to create receive socket. This default is likely not writable,
+# ## please reference the full plugin documentation for a recommended setup.
+# # socket_dir = "/var/run/"
+# ## Socket permissions for the receive socket.
+# # socket_mode = "0666"
+
+
+# # Monitor process cpu and memory usage
+[[inputs.procstat]]
+ exe = "freeswitch"
+
+[[inputs.procstat]]
+ exe = "rtpengine"
+
+[[inputs.procstat]]
+ exe = "drachtio"
+
+[[inputs.procstat]]
+ exe = "node"
+
+# ## PID file to monitor process
+# pid_file = "/var/run/nginx.pid"
+# ## executable name (ie, pgrep )
+ # exe = "nginx"
+# ## pattern as argument for pgrep (ie, pgrep -f )
+# # pattern = "nginx"
+# ## user as argument for pgrep (ie, pgrep -u )
+# # user = "nginx"
+# ## Systemd unit name
+# # systemd_unit = "nginx.service"
+# ## CGroup name or path
+# # cgroup = "systemd/system.slice/nginx.service"
+#
+# ## Windows service name
+# # win_service = ""
+#
+# ## override for process_name
+# ## This is optional; default is sourced from /proc//status
+# # process_name = "bar"
+#
+# ## Field name prefix
+# # prefix = ""
+#
+# ## When true add the full cmdline as a tag.
+# # cmdline_tag = false
+#
+# ## Add the PID as a tag instead of as a field. When collecting multiple
+# ## processes with otherwise matching tags this setting should be enabled to
+# ## ensure each process has a unique identity.
+# ##
+# ## Enabling this option may result in a large number of series, especially
+# ## when processes have a short lifetime.
+# # pid_tag = false
+#
+# ## Method to use when finding process IDs. Can be one of 'pgrep', or
+# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
+# ## the native finder performs the search directly in a manor dependent on the
+# ## platform. Default is 'pgrep'
+# # pid_finder = "pgrep"
+
+
+# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2).
+# [[inputs.proxmox]]
+# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
+# base_url = "https://localhost:8006/api2/json"
+# api_token = "USER@REALM!TOKENID=UUID"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# insecure_skip_verify = false
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+
+
+# # Reads last_run_summary.yaml file and converts to measurements
+# [[inputs.puppetagent]]
+# ## Location of puppet last run summary file
+# location = "/var/lib/puppet/state/last_run_summary.yaml"
+
+
+# # Reads metrics from RabbitMQ servers via the Management Plugin
+# [[inputs.rabbitmq]]
+# ## Management Plugin url. (default: http://localhost:15672)
+# # url = "http://localhost:15672"
+# ## Tag added to rabbitmq_overview series; deprecated: use tags
+# # name = "rmq-server-1"
+# ## Credentials
+# # username = "guest"
+# # password = "guest"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional request timeouts
+# ##
+# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
+# ## for a server's response headers after fully writing the request.
+# # header_timeout = "3s"
+# ##
+# ## client_timeout specifies a time limit for requests made by this client.
+# ## Includes connection time, any redirects, and reading the response body.
+# # client_timeout = "4s"
+#
+# ## A list of nodes to gather as the rabbitmq_node measurement. If not
+# ## specified, metrics for all nodes are gathered.
+# # nodes = ["rabbit@node1", "rabbit@node2"]
+#
+# ## A list of queues to gather as the rabbitmq_queue measurement. If not
+# ## specified, metrics for all queues are gathered.
+# # queues = ["telegraf"]
+#
+# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
+# ## specified, metrics for all exchanges are gathered.
+# # exchanges = ["telegraf"]
+#
+# ## Queues to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all queues
+# queue_name_include = []
+# queue_name_exclude = []
+#
+# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.
+# ## If neither are specified, metrics for all federation upstreams are gathered.
+# ## Federation link metrics will only be gathered for queues and exchanges
+# ## whose non-federation metrics will be collected (e.g a queue excluded
+# ## by the 'queue_name_exclude' option will also be excluded from federation).
+# ## Globs accepted.
+# # federation_upstream_include = ["dataCentre-*"]
+# # federation_upstream_exclude = []
+
+
+# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
+# [[inputs.raindrops]]
+# ## An array of raindrops middleware URI to gather stats.
+# urls = ["http://localhost:8080/_raindrops"]
+
+
+# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required).
+# [[inputs.ras]]
+# ## Optional path to RASDaemon sqlite3 database.
+# ## Default: /var/lib/rasdaemon/ras-mc_event.db
+# # db_path = ""
+
+
+# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs
+# [[inputs.redfish]]
+# ## Server url
+# address = "https://127.0.0.1:5000"
+#
+# ## Username, Password for hardware server
+# username = "root"
+# password = "password123456"
+#
+# ## ComputerSystemId
+# computer_system_id="2M220100SL"
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from one or many redis servers
+#[[inputs.redis]]
+# servers = ["tcp://jambonz.lpypq4.0001.usw1.cache.amazonaws.com:6379"]
+# ## specify servers via a url matching:
+# ## [protocol://][:password]@address[:port]
+# ## e.g.
+# ## tcp://localhost:6379
+# ## tcp://:password@192.168.99.100
+# ## unix:///var/run/redis.sock
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no port is specified, 6379 is used
+# servers = ["tcp://localhost:6379"]
+#
+# ## Optional. Specify redis commands to retrieve values
+# # [[inputs.redis.commands]]
+# # command = ["get", "sample-key"]
+# # field = "sample-key-value"
+# # type = "string"
+#
+# ## specify server password
+# # password = "s#cr@t%"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+
+
+# # Read metrics from one or many RethinkDB servers
+# [[inputs.rethinkdb]]
+# ## An array of URI to gather stats about. Specify an ip or hostname
+# ## with optional port add password. ie,
+# ## rethinkdb://user:auth_key@10.10.3.30:28105,
+# ## rethinkdb://10.10.3.33:18832,
+# ## 10.0.0.1:10000, etc.
+# servers = ["127.0.0.1:28015"]
+# ##
+# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
+# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
+# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
+# ##
+# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
+# ## have to be named "rethinkdb".
+# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
+
+
+# # Read metrics one or many Riak servers
+# [[inputs.riak]]
+# # Specify a list of one or more riak http servers
+# servers = ["http://localhost:8098"]
+
+
+# # Read API usage and limits for a Salesforce organisation
+# [[inputs.salesforce]]
+# ## specify your credentials
+# ##
+# username = "your_username"
+# password = "your_password"
+# ##
+# ## (optional) security token
+# # security_token = "your_security_token"
+# ##
+# ## (optional) environment type (sandbox or production)
+# ## default is: production
+# ##
+# # environment = "production"
+# ##
+# ## (optional) API version (default: "39.0")
+# ##
+# # version = "39.0"
+
+
+# # Monitor sensors, requires lm-sensors package
+# [[inputs.sensors]]
+# ## Remove numbers from field names.
+# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
+# # remove_numbers = true
+#
+# ## Timeout is the maximum amount of time that the sensors command can run.
+# # timeout = "5s"
+
+
+# # Read metrics from storage devices supporting S.M.A.R.T.
+# [[inputs.smart]]
+# ## Optionally specify the path to the smartctl executable
+# # path_smartctl = "/usr/bin/smartctl"
+#
+# ## Optionally specify the path to the nvme-cli executable
+# # path_nvme = "/usr/bin/nvme"
+#
+# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
+# ## ["auto-on"] - automatically find and enable additional vendor specific disk info
+# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
+# # enable_extensions = ["auto-on"]
+#
+# ## On most platforms used cli utilities requires root access.
+# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
+# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
+# ## without a password.
+# # use_sudo = false
+#
+# ## Skip checking disks in this power mode. Defaults to
+# ## "standby" to not wake up disks that have stopped rotating.
+# ## See --nocheck in the man pages for smartctl.
+# ## smartctl version 5.41 and 5.42 have faulty detection of
+# ## power mode and might require changing this value to
+# ## "never" depending on your disks.
+# # nocheck = "standby"
+#
+# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
+# ## information from each drive into the 'smart_attribute' measurement.
+# # attributes = false
+#
+# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
+# # excludes = [ "/dev/pass6" ]
+#
+# ## Optionally specify devices and device type, if unset
+# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
+# ## and all found will be included except for the excluded in excludes.
+# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
+#
+# ## Timeout for the cli command to complete.
+# # timeout = "30s"
+
+
+# # Retrieves SNMP values from remote agents
+# [[inputs.snmp]]
+# ## Agent addresses to retrieve values from.
+# ## example: agents = ["udp://127.0.0.1:161"]
+# ## agents = ["tcp://127.0.0.1:161"]
+# agents = ["udp://127.0.0.1:161"]
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
+#
+# ## Agent host tag; the tag used to reference the source host
+# # agent_host_tag = "agent_host"
+#
+# ## SNMP community string.
+# # community = "public"
+#
+# ## Number of retries to attempt.
+# # retries = 3
+#
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## Add fields and tables defining the variables you wish to collect. This
+# ## example collects the system uptime and interface variables. Reference the
+# ## full plugin documentation for configuration details.
+
+
+# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
+# [[inputs.snmp_legacy]]
+# ## Use 'oids.txt' file to translate oids to names
+# ## To generate 'oids.txt' you need to run:
+# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
+# ## Or if you have an other MIB folder with custom MIBs
+# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
+# snmptranslate_file = "/tmp/oids.txt"
+# [[inputs.snmp.host]]
+# address = "192.168.2.2:161"
+# # SNMP community
+# community = "public" # default public
+# # SNMP version (1, 2 or 3)
+# # Version 3 not supported yet
+# version = 2 # default 2
+# # SNMP response timeout
+# timeout = 2.0 # default 2.0
+# # SNMP request retries
+# retries = 2 # default 2
+# # Which get/bulk do you want to collect for this host
+# collect = ["mybulk", "sysservices", "sysdescr"]
+# # Simple list of OIDs to get, in addition to "collect"
+# get_oids = []
+#
+# [[inputs.snmp.host]]
+# address = "192.168.2.3:161"
+# community = "public"
+# version = 2
+# timeout = 2.0
+# retries = 2
+# collect = ["mybulk"]
+# get_oids = [
+# "ifNumber",
+# ".1.3.6.1.2.1.1.3.0",
+# ]
+#
+# [[inputs.snmp.get]]
+# name = "ifnumber"
+# oid = "ifNumber"
+#
+# [[inputs.snmp.get]]
+# name = "interface_speed"
+# oid = "ifSpeed"
+# instance = "0"
+#
+# [[inputs.snmp.get]]
+# name = "sysuptime"
+# oid = ".1.3.6.1.2.1.1.3.0"
+# unit = "second"
+#
+# [[inputs.snmp.bulk]]
+# name = "mybulk"
+# max_repetition = 127
+# oid = ".1.3.6.1.2.1.1"
+#
+# [[inputs.snmp.bulk]]
+# name = "ifoutoctets"
+# max_repetition = 127
+# oid = "ifOutOctets"
+#
+# [[inputs.snmp.host]]
+# address = "192.168.2.13:161"
+# #address = "127.0.0.1:161"
+# community = "public"
+# version = 2
+# timeout = 2.0
+# retries = 2
+# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
+# collect = ["sysuptime" ]
+# [[inputs.snmp.host.table]]
+# name = "iftable3"
+# include_instances = ["enp5s0", "eth1"]
+#
+# # SNMP TABLEs
+# # table without mapping neither subtables
+# [[inputs.snmp.table]]
+# name = "iftable1"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+#
+# # table without mapping but with subtables
+# [[inputs.snmp.table]]
+# name = "iftable2"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
+#
+# # table with mapping but without subtables
+# [[inputs.snmp.table]]
+# name = "iftable3"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# # if empty. get all instances
+# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+# # if empty, get all subtables
+#
+# # table with both mapping and subtables
+# [[inputs.snmp.table]]
+# name = "iftable4"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# # if empty get all instances
+# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+# # if empty get all subtables
+# # sub_tables could be not "real subtables"
+# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
+
+
+# # Read stats from one or more Solr servers or cores
+# [[inputs.solr]]
+# ## specify a list of one or more Solr servers
+# servers = ["http://localhost:8983"]
+#
+# ## specify a list of one or more Solr cores (default - all)
+# # cores = ["main"]
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+
+
+# # Read metrics from Microsoft SQL Server
+# [[inputs.sqlserver]]
+# ## Specify instances to monitor with a list of connection strings.
+# ## All connection parameters are optional.
+# ## By default, the host is localhost, listening on default port, TCP 1433.
+# ## for Windows, the user is the currently running AD user (SSO).
+# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
+# ## parameters, in particular, tls connections can be created like so:
+# ## "encrypt=true;certificate=;hostNameInCertificate="
+# # servers = [
+# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
+# # ]
+#
+# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
+# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
+# ## Possible values for database_type are
+# ## "AzureSQLDB"
+# ## "SQLServer"
+# ## "AzureSQLManagedInstance"
+# # database_type = "AzureSQLDB"
+#
+#
+# ## Optional parameter, setting this to 2 will use a new version
+# ## of the collection queries that break compatibility with the original
+# ## dashboards.
+# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
+# query_version = 2
+#
+# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
+# # azuredb = false
+#
+# ## Possible queries
+# ## Version 2:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - DatabaseIO
+# ## - ServerProperties
+# ## - MemoryClerk
+# ## - Schedulers
+# ## - SqlRequests
+# ## - VolumeSpace
+# ## - Cpu
+#
+# ## Version 1:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - CPUHistory
+# ## - DatabaseIO
+# ## - DatabaseSize
+# ## - DatabaseStats
+# ## - DatabaseProperties
+# ## - MemoryClerk
+# ## - VolumeSpace
+# ## - PerformanceMetrics
+#
+#
+# ## Queries enabled by default for specific Database Type
+# ## database_type = AzureSQLDB
+# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO
+#
+# ## A list of queries to include. If not specified, all the above listed queries are used.
+# # include_query = []
+#
+# ## A list of queries to explicitly ignore.
+# exclude_query = [ 'Schedulers' , 'SqlRequests']
+
+
+# # Gather timeseries from Google Cloud Platform v3 monitoring API
+# [[inputs.stackdriver]]
+# ## GCP Project
+# project = "erudite-bloom-151019"
+#
+# ## Include timeseries that start with the given metric type.
+# metric_type_prefix_include = [
+# "compute.googleapis.com/",
+# ]
+#
+# ## Exclude timeseries that start with the given metric type.
+# # metric_type_prefix_exclude = []
+#
+# ## Many metrics are updated once per minute; it is recommended to override
+# ## the agent level interval with a value of 1m or greater.
+# interval = "1m"
+#
+# ## Maximum number of API calls to make per second. The quota for accounts
+# ## varies, it can be viewed on the API dashboard:
+# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits
+# # rate_limit = 14
+#
+# ## The delay and window options control the number of points selected on
+# ## each gather. When set, metrics are gathered between:
+# ## start: now() - delay - window
+# ## end: now() - delay
+# #
+# ## Collection delay; if set too low metrics may not yet be available.
+# # delay = "5m"
+# #
+# ## If unset, the window will start at 1m and be updated dynamically to span
+# ## the time between calls (approximately the length of the plugin interval).
+# # window = "1m"
+#
+# ## TTL for cached list of metric types. This is the maximum amount of time
+# ## it may take to discover new metrics.
+# # cache_ttl = "1h"
+#
+# ## If true, raw bucket counts are collected for distribution value types.
+# ## For a more lightweight collection, you may wish to disable and use
+# ## distribution_aggregation_aligners instead.
+# # gather_raw_distribution_buckets = true
+#
+# ## Aggregate functions to be used for metrics whose value type is
+# ## distribution. These aggregate values are recorded in in addition to raw
+# ## bucket counts; if they are enabled.
+# ##
+# ## For a list of aligner strings see:
+# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
+# # distribution_aggregation_aligners = [
+# # "ALIGN_PERCENTILE_99",
+# # "ALIGN_PERCENTILE_95",
+# # "ALIGN_PERCENTILE_50",
+# # ]
+#
+# ## Filters can be added to reduce the number of time series matched. All
+# ## functions are supported: starts_with, ends_with, has_substring, and
+# ## one_of. Only the '=' operator is supported.
+# ##
+# ## The logical operators when combining filters are defined statically using
+# ## the following values:
+# ## filter ::= {AND }
+# ## resource_labels ::= {OR }
+# ## metric_labels ::= {OR }
+# ##
+# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters
+# #
+# ## Resource labels refine the time series selection with the following expression:
+# ## resource.labels. =
+# # [[inputs.stackdriver.filter.resource_labels]]
+# # key = "instance_name"
+# # value = 'starts_with("localhost")'
+# #
+# ## Metric labels refine the time series selection with the following expression:
+# ## metric.labels. =
+# # [[inputs.stackdriver.filter.metric_labels]]
+# # key = "device_name"
+# # value = 'one_of("sda", "sdb")'
+
+
+# # Get synproxy counter statistics from procfs
+# [[inputs.synproxy]]
+# # no configuration
+
+
+# # Sysstat metrics collector
+# [[inputs.sysstat]]
+# ## Path to the sadc command.
+# #
+# ## Common Defaults:
+# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
+# ## Arch: /usr/lib/sa/sadc
+# ## RHEL/CentOS: /usr/lib64/sa/sadc
+# sadc_path = "/usr/lib/sa/sadc" # required
+#
+# ## Path to the sadf command, if it is not in PATH
+# # sadf_path = "/usr/bin/sadf"
+#
+# ## Activities is a list of activities, that are passed as argument to the
+# ## sadc collector utility (e.g: DISK, SNMP etc...)
+# ## The more activities that are added, the more data is collected.
+# # activities = ["DISK"]
+#
+# ## Group metrics to measurements.
+# ##
+# ## If group is false each metric will be prefixed with a description
+# ## and represents itself a measurement.
+# ##
+# ## If Group is true, corresponding metrics are grouped to a single measurement.
+# # group = true
+#
+# ## Options for the sadf command. The values on the left represent the sadf
+# ## options and the values on the right their description (which are used for
+# ## grouping and prefixing metrics).
+# ##
+# ## Run 'sar -h' or 'man sar' to find out the supported options for your
+# ## sysstat version.
+# [inputs.sysstat.options]
+# -C = "cpu"
+# -B = "paging"
+# -b = "io"
+# -d = "disk" # requires DISK activity
+# "-n ALL" = "network"
+# "-P ALL" = "per_cpu"
+# -q = "queue"
+# -R = "mem"
+# -r = "mem_util"
+# -S = "swap_util"
+# -u = "cpu_util"
+# -v = "inode"
+# -W = "swap"
+# -w = "task"
+# # -H = "hugepages" # only available for newer linux distributions
+# # "-I ALL" = "interrupts" # requires INT activity
+#
+# ## Device tags can be used to add additional tags for devices.
+# ## For example the configuration below adds a tag vg with value rootvg for
+# ## all metrics with sda devices.
+# # [[inputs.sysstat.device_tags.sda]]
+# # vg = "rootvg"
+
+
+# # Gather systemd units state
+# [[inputs.systemd_units]]
+# ## Set timeout for systemctl execution
+# # timeout = "1s"
+# #
+# ## Filter for a specific unit type, default is "service", other possible
+# ## values are "socket", "target", "device", "mount", "automount", "swap",
+# ## "timer", "path", "slice" and "scope ":
+# # unittype = "service"
+
+
+# # Reads metrics from a Teamspeak 3 Server via ServerQuery
+# [[inputs.teamspeak]]
+# ## Server address for Teamspeak 3 ServerQuery
+# # server = "127.0.0.1:10011"
+# ## Username for ServerQuery
+# username = "serverqueryuser"
+# ## Password for ServerQuery
+# password = "secret"
+# ## Array of virtual servers
+# # virtual_servers = [1]
+
+
+# # Read metrics about temperature
+# [[inputs.temp]]
+# # no configuration
+
+
+# # Read Tengine's basic status information (ngx_http_reqstat_module)
+# [[inputs.tengine]]
+# # An array of Tengine reqstat module URI to gather stats.
+# urls = ["http://127.0.0.1/us"]
+#
+# # HTTP response timeout (default: 5s)
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.cer"
+# # tls_key = "/etc/telegraf/key.key"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Gather metrics from the Tomcat server status page.
+# [[inputs.tomcat]]
+# ## URL of the Tomcat server status
+# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
+#
+# ## HTTP Basic Auth Credentials
+# # username = "tomcat"
+# # password = "s3cret"
+#
+# ## Request timeout
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Inserts sine and cosine waves for demonstration purposes
+# [[inputs.trig]]
+# ## Set the amplitude
+# amplitude = 10.0
+
+
+# # Read Twemproxy stats data
+# [[inputs.twemproxy]]
+# ## Twemproxy stats address and port (no scheme)
+# addr = "localhost:22222"
+# ## Monitor pool name
+# pools = ["redis_pool", "mc_pool"]
+
+
+# # A plugin to collect stats from the Unbound DNS resolver
+# [[inputs.unbound]]
+# ## Address of server to connect to, read from unbound conf default, optionally ':port'
+# ## Will lookup IP if given a hostname
+# server = "127.0.0.1:8953"
+#
+# ## If running as a restricted user you can prepend sudo for additional access:
+# # use_sudo = false
+#
+# ## The default location of the unbound-control binary can be overridden with:
+# # binary = "/usr/sbin/unbound-control"
+#
+# ## The default location of the unbound config file can be overridden with:
+# # config_file = "/etc/unbound/unbound.conf"
+#
+# ## The default timeout of 1s can be overridden with:
+# # timeout = "1s"
+#
+# ## When set to true, thread metrics are tagged with the thread id.
+# ##
+# ## The default is false for backwards compatibility, and will be changed to
+# ## true in a future version. It is recommended to set to true on new
+# ## deployments.
+# thread_as_tag = false
+
+
+# # Read uWSGI metrics.
+# [[inputs.uwsgi]]
+# ## List with urls of uWSGI Stats servers. URL must match pattern:
+# ## scheme://address[:port]
+# ##
+# ## For example:
+# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
+# servers = ["tcp://127.0.0.1:1717"]
+#
+# ## General connection timeout
+# # timeout = "5s"
+
+
+# # A plugin to collect stats from Varnish HTTP Cache
+# [[inputs.varnish]]
+# ## If running as a restricted user you can prepend sudo for additional access:
+# #use_sudo = false
+#
+# ## The default location of the varnishstat binary can be overridden with:
+# binary = "/usr/bin/varnishstat"
+#
+# ## By default, telegraf gather stats for 3 metric points.
+# ## Setting stats will override the defaults shown below.
+# ## Glob matching can be used, ie, stats = ["MAIN.*"]
+# ## stats may also be set to ["*"], which will collect all stats
+# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
+#
+# ## Optional name for the varnish instance (or working directory) to query
+# ## Usually append after -n in varnish cli
+# # instance_name = instanceName
+#
+# ## Timeout for varnishstat command
+# # timeout = "1s"
+
+
+# # Collect Wireguard server interface and peer statistics
+# [[inputs.wireguard]]
+# ## Optional list of Wireguard device/interface names to query.
+# ## If omitted, all Wireguard interfaces are queried.
+# # devices = ["wg0"]
+
+
+# # Monitor wifi signal strength and quality
+# [[inputs.wireless]]
+# ## Sets 'proc' directory path
+# ## If not specified, then default is /proc
+# # host_proc = "/proc"
+
+
+# # Reads metrics from a SSL certificate
+# [[inputs.x509_cert]]
+# ## List certificate sources
+# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
+#
+# ## Timeout for SSL connection
+# # timeout = "5s"
+#
+# ## Pass a different name into the TLS request (Server Name Indication)
+# ## example: server_name = "myhost.example.org"
+# # server_name = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+
+
+# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
+# [[inputs.zfs]]
+# ## ZFS kstat path. Ignored on FreeBSD
+# ## If not specified, then default is:
+# # kstatPath = "/proc/spl/kstat/zfs"
+#
+# ## By default, telegraf gather all zfs stats
+# ## If not specified, then default is:
+# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
+# ## For Linux, the default is:
+# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
+# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
+# ## By default, don't gather zpool stats
+# # poolMetrics = false
+
+
+# # Reads 'mntr' stats from one or many zookeeper servers
+# [[inputs.zookeeper]]
+# ## An array of address to gather stats about. Specify an ip or hostname
+# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
+#
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no port is specified, 2181 is used
+# servers = [":2181"]
+#
+# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+
+
+###############################################################################
+# SERVICE INPUT PLUGINS #
+###############################################################################
+
+
+# # AMQP consumer plugin
+# [[inputs.amqp_consumer]]
+# ## Broker to consume from.
+# ## deprecated in 1.7; use the brokers option
+# # url = "amqp://localhost:5672/influxdb"
+#
+# ## Brokers to consume from. If multiple brokers are specified a random broker
+# ## will be selected anytime a connection is established. This can be
+# ## helpful for load balancing when not using a dedicated load balancer.
+# brokers = ["amqp://localhost:5672/influxdb"]
+#
+# ## Authentication credentials for the PLAIN auth_method.
+# # username = ""
+# # password = ""
+#
+# ## Name of the exchange to declare. If unset, no exchange will be declared.
+# exchange = "telegraf"
+#
+# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
+# # exchange_type = "topic"
+#
+# ## If true, exchange will be passively declared.
+# # exchange_passive = false
+#
+# ## Exchange durability can be either "transient" or "durable".
+# # exchange_durability = "durable"
+#
+# ## Additional exchange arguments.
+# # exchange_arguments = { }
+# # exchange_arguments = {"hash_property" = "timestamp"}
+#
+# ## AMQP queue name.
+# queue = "telegraf"
+#
+# ## AMQP queue durability can be "transient" or "durable".
+# queue_durability = "durable"
+#
+# ## If true, queue will be passively declared.
+# # queue_passive = false
+#
+# ## A binding between the exchange and queue using this binding key is
+# ## created. If unset, no binding is created.
+# binding_key = "#"
+#
+# ## Maximum number of messages server should give to the worker.
+# # prefetch_count = 50
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Auth method. PLAIN and EXTERNAL are supported
+# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+# ## described here: https://www.rabbitmq.com/plugins.html
+# # auth_method = "PLAIN"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read Cassandra metrics through Jolokia
+# [[inputs.cassandra]]
+# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
+# ## jolokia2 plugin instead.
+# ##
+# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
+#
+# context = "/jolokia/read"
+# ## List of cassandra servers exposing jolokia read service
+# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
+# ## List of metrics collected on above servers
+# ## Each metric consists of a jmx path.
+# ## This will collect all heap memory usage metrics from the jvm and
+# ## ReadLatency metrics for all keyspaces and tables.
+# ## "type=Table" in the query works with Cassandra3.0. Older versions might
+# ## need to use "type=ColumnFamily"
+# metrics = [
+# "/java.lang:type=Memory/HeapMemoryUsage",
+# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
+# ]
+
+
+# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
+# [[inputs.cisco_telemetry_mdt]]
+# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
+# ## using the grpc transport.
+# transport = "grpc"
+#
+# ## Address and port to host telemetry listener
+# service_address = ":57000"
+#
+# ## Enable TLS; grpc transport only.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Enable TLS client authentication and define allowed CA certificates; grpc
+# ## transport only.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
+# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
+#
+# ## Define aliases to map telemetry encoding paths to simple measurement names
+# [inputs.cisco_telemetry_mdt.aliases]
+# ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
+
+
+# # Read metrics from one or many ClickHouse servers
+# [[inputs.clickhouse]]
+# ## Username for authorization on ClickHouse server
+# ## example: username = "default""
+# username = "default"
+#
+# ## Password for authorization on ClickHouse server
+# ## example: password = "super_secret"
+#
+# ## HTTP(s) timeout while getting metrics values
+# ## The timeout includes connection time, any redirects, and reading the response body.
+# ## example: timeout = 1s
+# # timeout = 5s
+#
+# ## List of servers for metrics scraping
+# ## metrics scrape via HTTP(s) clickhouse interface
+# ## https://clickhouse.tech/docs/en/interfaces/http/
+# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
+# servers = ["http://127.0.0.1:8123"]
+#
+# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
+# ## with using same "user:password" described in "user" and "password" parameters
+# ## and get this server hostname list from "system.clusters" table
+# ## see
+# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
+# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
+# ## example: auto_discovery = false
+# # auto_discovery = true
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster IN (...)" filter will apply
+# ## please use only full cluster names here, regexp and glob filters is not allowed
+# ## for "/etc/clickhouse-server/config.d/remote.xml"
+# ##
+# ##
+# ##
+# ##
+# ## clickhouse-ru-1.local9000
+# ## clickhouse-ru-2.local9000
+# ##
+# ##
+# ## clickhouse-eu-1.local9000
+# ## clickhouse-eu-2.local9000
+# ##
+# ##
+# ##
+# ##
+# ##
+# ##
+# ## example: cluster_include = ["my-own-cluster"]
+# # cluster_include = []
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
+# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
+# # cluster_exclude = []
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from Google PubSub
+# [[inputs.cloud_pubsub]]
+# ## Required. Name of Google Cloud Platform (GCP) Project that owns
+# ## the given PubSub subscription.
+# project = "my-project"
+#
+# ## Required. Name of PubSub subscription to ingest metrics from.
+# subscription = "my-subscription"
+#
+# ## Required. Data format to consume.
+# ## Each data format has its own unique set of configuration options.
+# ## Read more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
+# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
+# ## Application Default Credentials, which is preferred.
+# # credentials_file = "path/to/my/creds.json"
+#
+# ## Optional. Number of seconds to wait before attempting to restart the
+# ## PubSub subscription receiver after an unexpected error.
+# ## If the streaming pull for a PubSub Subscription fails (receiver),
+# ## the agent attempts to restart receiving messages after this many seconds.
+# # retry_delay_seconds = 5
+#
+# ## Optional. Maximum byte length of a message to consume.
+# ## Larger messages are dropped with an error. If less than 0 or unspecified,
+# ## treated as no limit.
+# # max_message_len = 1000000
+#
+# ## Optional. Maximum messages to read from PubSub that have not been written
+# ## to an output. Defaults to 1000.
+# ## For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message contains 10 metrics and the output
+# ## metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## The following are optional Subscription ReceiveSettings in PubSub.
+# ## Read more about these values:
+# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
+#
+# ## Optional. Maximum number of seconds for which a PubSub subscription
+# ## should auto-extend the PubSub ACK deadline for each message. If less than
+# ## 0, auto-extension is disabled.
+# # max_extension = 0
+#
+# ## Optional. Maximum number of unprocessed messages in PubSub
+# ## (unacknowledged but not yet expired in PubSub).
+# ## A value of 0 is treated as the default PubSub value.
+# ## Negative values will be treated as unlimited.
+# # max_outstanding_messages = 0
+#
+# ## Optional. Maximum size in bytes of unprocessed messages in PubSub
+# ## (unacknowledged but not yet expired in PubSub).
+# ## A value of 0 is treated as the default PubSub value.
+# ## Negative values will be treated as unlimited.
+# # max_outstanding_bytes = 0
+#
+# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
+# ## to pull messages from PubSub concurrently. This limit applies to each
+# ## subscription separately and is treated as the PubSub default if less than
+# ## 1. Note this setting does not limit the number of messages that can be
+# ## processed concurrently (use "max_outstanding_messages" instead).
+# # max_receiver_go_routines = 0
+#
+# ## Optional. If true, Telegraf will attempt to base64 decode the
+# ## PubSub message data before parsing
+# # base64_data = false
+
+
+# # Google Cloud Pub/Sub Push HTTP listener
+# [[inputs.cloud_pubsub_push]]
+# ## Address and port to host HTTP listener on
+# service_address = ":8080"
+#
+# ## Application secret to verify messages originate from Cloud Pub/Sub
+# # token = ""
+#
+# ## Path to listen to.
+# # path = "/"
+#
+# ## Maximum duration before timing out read of the request
+# # read_timeout = "10s"
+# ## Maximum duration before timing out write of the response. This should be set to a value
+# ## large enough that you can send at least 'metric_batch_size' number of messages within the
+# ## duration.
+# # write_timeout = "10s"
+#
+# ## Maximum allowed http request body size in bytes.
+# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
+# # max_body_size = "500MB"
+#
+# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
+# # add_meta = false
+#
+# ## Optional. Maximum messages to read from PubSub that have not been written
+# ## to an output. Defaults to 1000.
+# ## For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message contains 10 metrics and the output
+# ## metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read logging output from the Docker engine
+# [[inputs.docker_log]]
+# ## Docker Endpoint
+# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+# # endpoint = "unix:///var/run/docker.sock"
+#
+# ## When true, container logs are read from the beginning; otherwise
+# ## reading begins at the end of the log.
+# # from_beginning = false
+#
+# ## Timeout for Docker API calls.
+# # timeout = "5s"
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "running" state will be captured.
+# # container_state_include = []
+# # container_state_exclude = []
+#
+# ## docker labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# # docker_label_include = []
+# # docker_label_exclude = []
+#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Azure Event Hubs service input plugin
+# [[inputs.eventhub_consumer]]
+# ## The default behavior is to create a new Event Hub client from environment variables.
+# ## This requires one of the following sets of environment variables to be set:
+# ##
+# ## 1) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_CONNECTION_STRING"
+# ##
+# ## 2) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_KEY_NAME"
+# ## - "EVENTHUB_KEY_VALUE"
+#
+# ## Uncommenting the option below will create an Event Hub client based solely on the connection string.
+# ## This can either be the associated environment variable or hard coded directly.
+# # connection_string = ""
+#
+# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
+# # persistence_dir = ""
+#
+# ## Change the default consumer group
+# # consumer_group = ""
+#
+# ## By default the event hub receives all messages present on the broker, alternative modes can be set below.
+# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
+# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
+# # from_timestamp =
+# # latest = true
+#
+# ## Set a custom prefetch count for the receiver(s)
+# # prefetch_count = 1000
+#
+# ## Add an epoch to the receiver(s)
+# # epoch = 0
+#
+# ## Change to set a custom user agent, "telegraf" is used by default
+# # user_agent = "telegraf"
+#
+# ## To consume from a specific partition, set the partition_ids option.
+# ## An empty array will result in receiving from all partitions.
+# # partition_ids = ["0","1"]
+#
+# ## Max undelivered messages
+# # max_undelivered_messages = 1000
+#
+# ## Set either option below to true to use a system property as timestamp.
+# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
+# ## It is recommended to use this setting when the data itself has no timestamp.
+# # enqueued_time_as_ts = true
+# # iot_hub_enqueued_time_as_ts = true
+#
+# ## Tags or fields to create from keys present in the application property bag.
+# ## These could for example be set by message enrichments in Azure IoT Hub.
+# # application_property_tags = []
+# # application_property_fields = []
+#
+# ## Tag or field name to use for metadata
+# ## By default all metadata is disabled
+# # sequence_number_field = "SequenceNumber"
+# # enqueued_time_field = "EnqueuedTime"
+# # offset_field = "Offset"
+# # partition_id_tag = "PartitionID"
+# # partition_key_tag = "PartitionKey"
+# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
+# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
+# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
+# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
+# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Run executable as long-running input plugin
+# [[inputs.execd]]
+# ## Program to run as daemon
+# command = ["telegraf-smartctl", "-d", "/dev/sda"]
+#
+# ## Define how the process is signaled on each collection interval.
+# ## Valid values are:
+# ## "none" : Do not signal anything.
+# ## The process must output metrics by itself.
+# ## "STDIN" : Send a newline on STDIN.
+# ## "SIGHUP" : Send a HUP signal. Not available on Windows.
+# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+# signal = "none"
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # gNMI telemetry input plugin
+# [[inputs.gnmi]]
+# ## Address and port of the gNMI GRPC server
+# addresses = ["10.49.234.114:57777"]
+#
+# ## define credentials
+# username = "cisco"
+# password = "cisco"
+#
+# ## gNMI encoding requested (one of: "proto", "json", "json_ietf")
+# # encoding = "proto"
+#
+# ## redial in case of failures after
+# redial = "10s"
+#
+# ## enable client-side TLS and define CA to authenticate the device
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # insecure_skip_verify = true
+#
+# ## define client-side TLS certificate & key to authenticate to the device
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## gNMI subscription prefix (optional, can usually be left empty)
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# # origin = ""
+# # prefix = ""
+# # target = ""
+#
+# ## Define additional aliases to map telemetry encoding paths to simple measurement names
+# #[inputs.gnmi.aliases]
+# # ifcounters = "openconfig:/interfaces/interface/state/counters"
+#
+# [[inputs.gnmi.subscription]]
+# ## Name of the measurement that will be emitted
+# name = "ifcounters"
+#
+# ## Origin and path of the subscription
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# ##
+# ## origin usually refers to a (YANG) data model implemented by the device
+# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
+# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
+# origin = "openconfig-interfaces"
+# path = "/interfaces/interface/state/counters"
+#
+# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
+# subscription_mode = "sample"
+# sample_interval = "10s"
+#
+# ## Suppress redundant transmissions when measured values are unchanged
+# # suppress_redundant = false
+#
+# ## If suppression is enabled, send updates at least every X seconds anyway
+# # heartbeat_interval = "60s"
+
+
+# # Accept metrics over InfluxDB 1.x HTTP API
+# [[inputs.http_listener]]
+# ## Address and port to host InfluxDB listener on
+# service_address = ":8186"
+#
+# ## maximum duration before timing out read of the request
+# read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# write_timeout = "10s"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
+#
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+
+
+# # Generic HTTP write listener
+# [[inputs.http_listener_v2]]
+# ## Address and port to host HTTP listener on
+# service_address = ":8080"
+#
+# ## Path to listen to.
+# # path = "/telegraf"
+#
+# ## HTTP methods to accept.
+# # methods = ["POST", "PUT"]
+#
+# ## maximum duration before timing out read of the request
+# # read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# # write_timeout = "10s"
+#
+# ## Maximum allowed http request body size in bytes.
+# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
+# # max_body_size = "500MB"
+#
+# ## Part of the request to consume. Available options are "body" and
+# ## "query".
+# # data_source = "body"
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+#
+# ## Optional setting to map http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Accept metrics over InfluxDB 1.x HTTP API
+# [[inputs.influxdb_listener]]
+# ## Address and port to host InfluxDB listener on
+# service_address = ":8186"
+#
+# ## maximum duration before timing out read of the request
+# read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# write_timeout = "10s"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
+#
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+
+
+# # Accept metrics over InfluxDB 2.x HTTP API
+# [[inputs.influxdb_v2_listener]]
+# ## Address and port to host InfluxDB listener on
+# ## (Double check the port. Could be 9999 if using OSS Beta)
+# service_address = ":8086"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# # max_body_size = "32MiB"
+#
+# ## Optional tag to determine the bucket.
+# ## If the write has a bucket in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # bucket_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional token to accept for HTTP authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # token = "some-long-shared-secret-token"
+
+
+# # Intel Resource Director Technology plugin
+# [[inputs.intel_rdt]]
+# ## Optionally set sampling interval to Nx100ms.
+# ## This value is propagated to pqos tool. Interval format is defined by pqos itself.
+# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s.
+# # sampling_interval = "10"
+#
+# ## Optionally specify the path to pqos executable.
+# ## If not provided, auto discovery will be performed.
+# # pqos_path = "/usr/local/bin/pqos"
+#
+# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated.
+# ## If not provided, default value is false.
+# # shortened_metrics = false
+#
+# ## Specify the list of groups of CPU core(s) to be provided as pqos input.
+# ## Mandatory if processes aren't set and forbidden if processes are specified.
+# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"]
+# # cores = ["0-3"]
+#
+# ## Specify the list of processes for which Metrics will be collected.
+# ## Mandatory if cores aren't set and forbidden if cores are specified.
+# ## e.g. ["qemu", "pmd"]
+# # processes = ["process"]
+
+
+# # Read JTI OpenConfig Telemetry from listed sensors
+# [[inputs.jti_openconfig_telemetry]]
+# ## List of device addresses to collect telemetry from
+# servers = ["localhost:1883"]
+#
+# ## Authentication details. Username and password are must if device expects
+# ## authentication. Client ID must be unique when connecting from multiple instances
+# ## of telegraf to the same device
+# username = "user"
+# password = "pass"
+# client_id = "telegraf"
+#
+# ## Frequency to get data
+# sample_frequency = "1000ms"
+#
+# ## Sensors to subscribe for
+# ## A identifier for each sensor can be provided in path by separating with space
+# ## Else sensor path will be used as identifier
+# ## When identifier is used, we can provide a list of space separated sensors.
+# ## A single subscription will be created with all these sensors and data will
+# ## be saved to measurement with this identifier name
+# sensors = [
+# "/interfaces/",
+# "collection /components/ /lldp",
+# ]
+#
+# ## We allow specifying sensor group level reporting rate. To do this, specify the
+# ## reporting rate in Duration at the beginning of sensor paths / collection
+# ## name. For entries without reporting rate, we use configured sample frequency
+# sensors = [
+# "1000ms customReporting /interfaces /lldp",
+# "2000ms collection /components",
+# "/interfaces",
+# ]
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
+# ## Failed streams/calls will not be retried if 0 is provided
+# retry_delay = "1000ms"
+#
+# ## To treat all string values as tags, set this to true
+# str_as_tags = false
+
+
+# # Read metrics from Kafka topics
+# [[inputs.kafka_consumer]]
+# ## Kafka brokers.
+# brokers = ["localhost:9092"]
+#
+# ## Topics to consume.
+# topics = ["telegraf"]
+#
+# ## When set this tag will be added to all metrics with the topic as the value.
+# # topic_tag = ""
+#
+# ## Optional Client id
+# # client_id = "Telegraf"
+#
+# ## Set the minimal supported Kafka version. Setting this enables the use of new
+# ## Kafka features and APIs. Must be 0.10.2.0 or greater.
+# ## ex: version = "1.1.0"
+# # version = ""
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## SASL authentication credentials. These settings should typically be used
+# ## with TLS encryption enabled using the "enable_tls" option.
+# # sasl_username = "kafka"
+# # sasl_password = "secret"
+#
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
+# ## Name of the consumer group.
+# # consumer_group = "telegraf_metrics_consumers"
+#
+# ## Initial offset position; one of "oldest" or "newest".
+# # offset = "oldest"
+#
+# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
+# # balance_strategy = "range"
+#
+# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+# ## larger messages are dropped
+# max_message_len = 1000000
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from Kafka topic(s)
+# [[inputs.kafka_consumer_legacy]]
+# ## topic(s) to consume
+# topics = ["telegraf"]
+#
+# ## an array of Zookeeper connection strings
+# zookeeper_peers = ["localhost:2181"]
+#
+# ## Zookeeper Chroot
+# zookeeper_chroot = ""
+#
+# ## the name of the consumer group
+# consumer_group = "telegraf_metrics_consumers"
+#
+# ## Offset (must be either "oldest" or "newest")
+# offset = "oldest"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+# ## larger messages are dropped
+# max_message_len = 65536
+
+
+# # Configuration for the AWS Kinesis input.
+# [[inputs.kinesis_consumer]]
+# ## Amazon REGION of kinesis endpoint.
+# region = "ap-southeast-2"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# # access_key = ""
+# # secret_key = ""
+# # token = ""
+# # role_arn = ""
+# # profile = ""
+# # shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Kinesis StreamName must exist prior to starting telegraf.
+# streamname = "StreamName"
+#
+# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
+# # shard_iterator_type = "TRIM_HORIZON"
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional
+# ## Configuration for a dynamodb checkpoint
+# [inputs.kinesis_consumer.checkpoint_dynamodb]
+# ## unique name for this consumer
+# app_name = "default"
+# table_name = "default"
+
+
+# # Read metrics off Arista LANZ, via socket
+# [[inputs.lanz]]
+# ## URL to Arista LANZ endpoint
+# servers = [
+# "tcp://127.0.0.1:50001"
+# ]
+
+
+# # Stream and parse log file(s).
+# [[inputs.logparser]]
+# ## Log files to parse.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## /var/log/**.log -> recursively find all .log files in /var/log
+# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
+# ## /var/log/apache.log -> only tail the apache log file
+# files = ["/var/log/apache/access.log"]
+#
+# ## Read files that currently exist from the beginning. Files that are created
+# ## while telegraf is running (and that match the "files" globs) will always
+# ## be read from the beginning.
+# from_beginning = false
+#
+# ## Method used to watch for file updates. Can be either "inotify" or "poll".
+# # watch_method = "inotify"
+#
+# ## Parse logstash-style "grok" patterns:
+# [inputs.logparser.grok]
+# ## This is a list of patterns to check the given log file(s) for.
+# ## Note that adding patterns here increases processing time. The most
+# ## efficient configuration is to have one pattern per logparser.
+# ## Other common built-in patterns are:
+# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
+# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
+# patterns = ["%{COMBINED_LOG_FORMAT}"]
+#
+# ## Name of the outputted measurement name.
+# measurement = "apache_access_log"
+#
+# ## Full path(s) to custom pattern files.
+# custom_pattern_files = []
+#
+# ## Custom patterns can also be defined here. Put one pattern per line.
+# custom_patterns = '''
+# '''
+#
+# ## Timezone allows you to provide an override for timestamps that
+# ## don't already include an offset
+# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
+# ##
+# ## Default: "" which renders UTC
+# ## Options are as follows:
+# ## 1. Local -- interpret based on machine localtime
+# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
+# # timezone = "Canada/Eastern"
+#
+# ## When set to "disable", timestamp will not incremented if there is a
+# ## duplicate.
+# # unique_timestamp = "auto"
+
+
+# # Read metrics from MQTT topic(s)
+# [[inputs.mqtt_consumer]]
+# ## Broker URLs for the MQTT server or cluster. To connect to multiple
+# ## clusters or standalone servers, use a seperate plugin instance.
+# ## example: servers = ["tcp://localhost:1883"]
+# ## servers = ["ssl://localhost:1883"]
+# ## servers = ["ws://localhost:1883"]
+# servers = ["tcp://127.0.0.1:1883"]
+#
+# ## Topics that will be subscribed to.
+# topics = [
+# "telegraf/host01/cpu",
+# "telegraf/+/mem",
+# "sensors/#",
+# ]
+#
+# ## The message topic will be stored in a tag specified by this value. If set
+# ## to the empty string no topic tag will be created.
+# # topic_tag = "topic"
+#
+# ## QoS policy for messages
+# ## 0 = at most once
+# ## 1 = at least once
+# ## 2 = exactly once
+# ##
+# ## When using a QoS of 1 or 2, you should enable persistent_session to allow
+# ## resuming unacknowledged messages.
+# # qos = 0
+#
+# ## Connection timeout for initial connection in seconds
+# # connection_timeout = "30s"
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Persistent session disables clearing of the client session on connection.
+# ## In order for this option to work you must also set client_id to identify
+# ## the client. To receive messages that arrived while the client is offline,
+# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
+# ## publishing.
+# # persistent_session = false
+#
+# ## If unset, a random client ID will be generated.
+# # client_id = ""
+#
+# ## Username and password to connect MQTT server.
+# # username = "telegraf"
+# # password = "metricsmetricsmetricsmetrics"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from NATS subject(s)
+# [[inputs.nats_consumer]]
+# ## urls of NATS servers
+# servers = ["nats://localhost:4222"]
+#
+# ## subject(s) to consume
+# subjects = ["telegraf"]
+#
+# ## name a queue group
+# queue_group = "telegraf_consumers"
+#
+# ## Optional credentials
+# # username = ""
+# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
+# ## Use Transport Layer Security
+# # secure = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Sets the limits for pending msgs and bytes for each subscription
+# ## These shouldn't need to be adjusted except in very high throughput scenarios
+# # pending_message_limit = 65536
+# # pending_bytes_limit = 67108864
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read NSQ topic for metrics.
+# [[inputs.nsq_consumer]]
+# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
+# # server = "localhost:4150"
+#
+# ## An array representing the NSQD TCP HTTP Endpoints
+# nsqd = ["localhost:4150"]
+#
+# ## An array representing the NSQLookupd HTTP Endpoints
+# nsqlookupd = ["localhost:4161"]
+# topic = "telegraf"
+# channel = "consumer"
+# max_in_flight = 100
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from one or many pgbouncer servers
+# [[inputs.pgbouncer]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# ##
+# ## All connection parameters are optional.
+# ##
+# address = "host=localhost user=pgbouncer sslmode=disable"
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# ##
+# ## All connection parameters are optional.
+# ##
+# ## Without the dbname parameter, the driver will default to a database
+# ## with the same name as the user. This dbname is just for instantiating a
+# ## connection with the server and doesn't restrict the databases we are trying
+# ## to grab metrics for.
+# ##
+# address = "host=localhost user=postgres sslmode=disable"
+# ## A custom name for the database that will be used as the "server" tag in the
+# ## measurement output. If not specified, a default one generated from
+# ## the connection address is used.
+# # outputaddress = "db01"
+#
+# ## connection configuration.
+# ## maxlifetime - specify the maximum lifetime of a connection.
+# ## default is forever (0s)
+# max_lifetime = "0s"
+#
+# ## A list of databases to explicitly ignore. If not specified, metrics for all
+# ## databases are gathered. Do NOT use with the 'databases' option.
+# # ignored_databases = ["postgres", "template0", "template1"]
+#
+# ## A list of databases to pull metrics about. If not specified, metrics for all
+# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
+# # databases = ["app_production", "testing"]
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql_extensible]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# #
+# ## All connection parameters are optional. #
+# ## Without the dbname parameter, the driver will default to a database
+# ## with the same name as the user. This dbname is just for instantiating a
+# ## connection with the server and doesn't restrict the databases we are trying
+# ## to grab metrics for.
+# #
+# address = "host=localhost user=postgres sslmode=disable"
+#
+# ## connection configuration.
+# ## maxlifetime - specify the maximum lifetime of a connection.
+# ## default is forever (0s)
+# max_lifetime = "0s"
+#
+# ## A list of databases to pull metrics about. If not specified, metrics for all
+# ## databases are gathered.
+# ## databases = ["app_production", "testing"]
+# #
+# ## A custom name for the database that will be used as the "server" tag in the
+# ## measurement output. If not specified, a default one generated from
+# ## the connection address is used.
+# # outputaddress = "db01"
+# #
+# ## Define the toml config where the sql queries are stored
+# ## New queries can be added, if the withdbname is set to true and there is no
+# ## databases defined in the 'databases field', the sql query is ended by a
+# ## 'is not null' in order to make the query succeed.
+# ## Example :
+# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
+# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
+# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
+# ## withdbname was true. Be careful that if the withdbname is set to false you
+# ## don't have to define the where clause (aka with the dbname) the tagvalue
+# ## field is used to define custom tags (separated by commas)
+# ## The optional "measurement" value can be used to override the default
+# ## output measurement name ("postgresql").
+# ##
+# ## The script option can be used to specify the .sql file path.
+# ## If script and sqlquery options specified at same time, sqlquery will be used
+# ##
+# ## Structure :
+# ## [[inputs.postgresql_extensible.query]]
+# ## sqlquery string
+# ## version string
+# ## withdbname boolean
+# ## tagvalue string (comma separated)
+# ## measurement string
+# [[inputs.postgresql_extensible.query]]
+# sqlquery="SELECT * FROM pg_stat_database"
+# version=901
+# withdbname=false
+# tagvalue=""
+# measurement=""
+# [[inputs.postgresql_extensible.query]]
+# sqlquery="SELECT * FROM pg_stat_bgwriter"
+# version=901
+# withdbname=false
+# tagvalue="postgresql.stats"
+
+
+# # Read metrics from one or many prometheus clients
+# [[inputs.prometheus]]
+# ## An array of urls to scrape metrics from.
+# urls = ["http://localhost:9100/metrics"]
+#
+# ## Metric version controls the mapping from Prometheus metrics into
+# ## Telegraf metrics. When using the prometheus_client output, use the same
+# ## value in both plugins to ensure metrics are round-tripped without
+# ## modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## Url tag name (tag containing scrapped url. optional, default is "url")
+# # url_tag = "scrapeUrl"
+#
+# ## An array of Kubernetes services to scrape metrics from.
+# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
+#
+# ## Kubernetes config file to create client from.
+# # kube_config = "/path/to/kubernetes.config"
+#
+# ## Scrape Kubernetes pods for the following prometheus annotations:
+# ## - prometheus.io/scrape: Enable scraping for this pod
+# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
+# ## set this to 'https' & most likely set the tls config.
+# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
+# ## - prometheus.io/port: If port is not 9102 use this annotation
+# # monitor_kubernetes_pods = true
+# ## Restricts Kubernetes monitoring to a single namespace
+# ## ex: monitor_kubernetes_pods_namespace = "default"
+# # monitor_kubernetes_pods_namespace = ""
+# # label selector to target pods which have the label
+# # kubernetes_label_selector = "env=dev,app=nginx"
+# # field selector to target pods
+# # eg. To scrape pods on a specific node
+# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## HTTP Basic Authentication username and password. ('bearer_token' and
+# ## 'bearer_token_string' take priority)
+# # username = ""
+# # password = ""
+#
+# ## Specify timeout duration for slower prometheus clients (default is 3s)
+# # response_timeout = "3s"
+#
+# ## Optional TLS Config
+# # tls_ca = /path/to/cafile
+# # tls_cert = /path/to/certfile
+# # tls_key = /path/to/keyfile
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # SFlow V5 Protocol Listener
+# [[inputs.sflow]]
+# ## Address to listen for sFlow packets.
+# ## example: service_address = "udp://:6343"
+# ## service_address = "udp4://:6343"
+# ## service_address = "udp6://:6343"
+# service_address = "udp://:6343"
+#
+# ## Set the size of the operating system's receive buffer.
+# ## example: read_buffer_size = "64KiB"
+# # read_buffer_size = ""
+
+
+# # Receive SNMP traps
+# [[inputs.snmp_trap]]
+# ## Transport, local address, and port to listen on. Transport must
+# ## be "udp://". Omit local address to listen on all interfaces.
+# ## example: "udp://127.0.0.1:1234"
+# ##
+# ## Special permissions may be required to listen on a port less than
+# ## 1024. See README.md for details
+# ##
+# # service_address = "udp://:162"
+# ## Timeout running snmptranslate command
+# # timeout = "5s"
+# ## Snmp version, defaults to 2c
+# # version = "2c"
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA" or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+
+
+[[inputs.socket_listener]]
+ service_address = "udp://127.0.0.1:8094"
+ data_format = "influx"
+
+[[inputs.socket_listener]]
+ service_address = "tcp://127.0.0.1:8094"
+ data_format = "influx"
+
+# # Generic socket listener capable of handling multiple socket types.
+# [[inputs.socket_listener]]
+# ## URL to listen on
+# # service_address = "tcp://:8094"
+# # service_address = "tcp://127.0.0.1:http"
+# # service_address = "tcp4://:8094"
+# # service_address = "tcp6://:8094"
+# # service_address = "tcp6://[2001:db8::1]:8094"
+# # service_address = "udp://:8094"
+# # service_address = "udp4://:8094"
+# # service_address = "udp6://:8094"
+# # service_address = "unix:///tmp/telegraf.sock"
+# # service_address = "unixgram:///tmp/telegraf.sock"
+#
+# ## Change the file mode bits on unix sockets. These permissions may not be
+# ## respected by some platforms, to safely restrict write permissions it is best
+# ## to place the socket into a directory that has previously been created
+# ## with the desired permissions.
+# ## ex: socket_mode = "777"
+# # socket_mode = ""
+#
+# ## Maximum number of concurrent connections.
+# ## Only applies to stream sockets (e.g. TCP).
+# ## 0 (default) is unlimited.
+# # max_connections = 1024
+#
+# ## Read timeout.
+# ## Only applies to stream sockets (e.g. TCP).
+# ## 0 (default) is unlimited.
+# # read_timeout = "30s"
+#
+# ## Optional TLS configuration.
+# ## Only applies to stream sockets (e.g. TCP).
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Enables client authentication if set.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Maximum socket buffer size (in bytes when no unit specified).
+# ## For stream sockets, once the buffer fills up, the sender will start backing up.
+# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
+# ## Defaults to the OS default.
+# # read_buffer_size = "64KiB"
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
+
+
+# # Statsd UDP/TCP Server
+[[inputs.statsd]]
+ protocol = "tcp"
+ max_tcp_connections = 250
+ tcp_keep_alive = false
+ # tcp_keep_alive_period = "2h"
+ service_address = "127.0.0.1:8125"
+ delete_gauges = true
+ delete_counters = true
+ delete_sets = true
+ delete_timings = true
+ ## Percentiles to calculate for timing & histogram stats.
+ percentiles = [50.0, 75.0, 95.0, 99.0, 99.95, 100.0]
+ metric_separator = "_"
+ datadog_extensions = true
+ allowed_pending_messages = 10000
+ percentile_limit = 1000
+ # read_buffer_size = 65535
+
+# [[inputs.statsd]]
+# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
+# protocol = "udp"
+#
+# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
+# max_tcp_connections = 250
+#
+# ## Enable TCP keep alive probes (default=false)
+# tcp_keep_alive = false
+#
+# ## Specifies the keep-alive period for an active network connection.
+# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
+# ## Defaults to the OS configuration.
+# # tcp_keep_alive_period = "2h"
+#
+# ## Address and port to host UDP listener on
+# service_address = ":8125"
+#
+# ## The following configuration options control when telegraf clears it's cache
+# ## of previous values. If set to false, then telegraf will only clear it's
+# ## cache when the daemon is restarted.
+# ## Reset gauges every interval (default=true)
+# delete_gauges = true
+# ## Reset counters every interval (default=true)
+# delete_counters = true
+# ## Reset sets every interval (default=true)
+# delete_sets = true
+# ## Reset timings & histograms every interval (default=true)
+# delete_timings = true
+#
+# ## Percentiles to calculate for timing & histogram stats
+# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
+#
+# ## separator to use between elements of a statsd metric
+# metric_separator = "_"
+#
+# ## Parses tags in the datadog statsd format
+# ## http://docs.datadoghq.com/guides/dogstatsd/
+# parse_data_dog_tags = false
+#
+# ## Parses datadog extensions to the statsd format
+# datadog_extensions = false
+#
+# ## Statsd data translation templates, more info can be read here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
+# # templates = [
+# # "cpu.* measurement*"
+# # ]
+#
+# ## Number of UDP messages allowed to queue up, once filled,
+# ## the statsd server will start dropping packets
+# allowed_pending_messages = 10000
+#
+# ## Number of timing/histogram values to track per-measurement in the
+# ## calculation of percentiles. Raising this limit increases the accuracy
+# ## of percentiles but also increases the memory usage and cpu time.
+# percentile_limit = 1000
+
+
+# # Suricata stats plugin
+# [[inputs.suricata]]
+# ## Data sink for Suricata stats log
+# # This is expected to be a filename of a
+# # unix socket to be created for listening.
+# source = "/var/run/suricata-stats.sock"
+#
+# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
+# # becomes "detect_alert" when delimiter is "_".
+# delimiter = "_"
+
+
+# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
+# [[inputs.syslog]]
+# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
+# ## Protocol, address and port to host the syslog receiver.
+# ## If no host is specified, then localhost is used.
+# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
+# server = "tcp://:6514"
+#
+# ## TLS Config
+# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Period between keep alive probes.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# ## Only applies to stream sockets (e.g. TCP).
+# # keep_alive_period = "5m"
+#
+# ## Maximum number of concurrent connections (default = 0).
+# ## 0 means unlimited.
+# ## Only applies to stream sockets (e.g. TCP).
+# # max_connections = 1024
+#
+# ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
+# ## 0 means unlimited.
+# # read_timeout = "5s"
+#
+# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
+# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
+# ## Must be one of "octet-counting", "non-transparent".
+# # framing = "octet-counting"
+#
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
+# ## Must be one of "LF", or "NUL".
+# # trailer = "LF"
+#
+# ## Whether to parse in best effort mode or not (default = false).
+# ## By default best effort parsing is off.
+# # best_effort = false
+#
+# ## Character to prepend to SD-PARAMs (default = "_").
+# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
+# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
+# ## For each combination a field is created.
+# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
+# # sdparam_separator = "_"
+
+
+# # Parse the new lines appended to a file
+# [[inputs.tail]]
+# ## File names or a pattern to tail.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## "/var/log/**.log" -> recursively find all .log files in /var/log
+# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+# ## "/var/log/apache.log" -> just tail the apache log file
+# ##
+# ## See https://github.com/gobwas/glob for more examples
+# ##
+# files = ["/var/mymetrics.out"]
+#
+# ## Read file from beginning.
+# # from_beginning = false
+#
+# ## Whether file is a named pipe
+# # pipe = false
+#
+# ## Method used to watch for file updates. Can be either "inotify" or "poll".
+# # watch_method = "inotify"
+#
+# ## Maximum lines of the file to process that have not yet be written by the
+# ## output. For best throughput set based on the number of metrics on each
+# ## line and the size of the output's metric_batch_size.
+# # max_undelivered_lines = 1000
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## multiline parser/codec
+# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html
+# #[inputs.tail.multiline]
+# ## The pattern should be a regexp which matches what you believe to be an
+# ## indicator that the field is part of an event consisting of multiple lines of log data.
+# #pattern = "^\s"
+#
+# ## This field must be either "previous" or "next".
+# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line,
+# ## whereas "next" indicates that the line belongs to the next one.
+# #match_which_line = "previous"
+#
+# ## The invert_match field can be true or false (defaults to false).
+# ## If true, a message not matching the pattern will constitute a match of the multiline
+# ## filter and the what will be applied. (vice-versa is also true)
+# #invert_match = false
+#
+# ## After the specified timeout, this plugin sends a multiline event even if no new pattern
+# ## is found to start a new event. The default timeout is 5s.
+# #timeout = 5s
+
+
+# # Generic TCP listener
+# [[inputs.tcp_listener]]
+# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+# # socket_listener plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
+
+
+# # Generic UDP listener
+# [[inputs.udp_listener]]
+# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+# # socket_listener plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
+
+
+# # Read metrics from VMware vCenter
+# [[inputs.vsphere]]
+# ## List of vCenter URLs to be monitored. These three lines must be uncommented
+# ## and edited for the plugin to work.
+# vcenters = [ "https://vcenter.local/sdk" ]
+# username = "user@corp.local"
+# password = "secret"
+#
+# ## VMs
+# ## Typical VM metrics (if omitted or empty, all metrics are collected)
+# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
+# # vm_exclude = [] # Inventory paths to exclude
+# vm_metric_include = [
+# "cpu.demand.average",
+# "cpu.idle.summation",
+# "cpu.latency.average",
+# "cpu.readiness.average",
+# "cpu.ready.summation",
+# "cpu.run.summation",
+# "cpu.usagemhz.average",
+# "cpu.used.summation",
+# "cpu.wait.summation",
+# "mem.active.average",
+# "mem.granted.average",
+# "mem.latency.average",
+# "mem.swapin.average",
+# "mem.swapinRate.average",
+# "mem.swapout.average",
+# "mem.swapoutRate.average",
+# "mem.usage.average",
+# "mem.vmmemctl.average",
+# "net.bytesRx.average",
+# "net.bytesTx.average",
+# "net.droppedRx.summation",
+# "net.droppedTx.summation",
+# "net.usage.average",
+# "power.power.average",
+# "virtualDisk.numberReadAveraged.average",
+# "virtualDisk.numberWriteAveraged.average",
+# "virtualDisk.read.average",
+# "virtualDisk.readOIO.latest",
+# "virtualDisk.throughput.usage.average",
+# "virtualDisk.totalReadLatency.average",
+# "virtualDisk.totalWriteLatency.average",
+# "virtualDisk.write.average",
+# "virtualDisk.writeOIO.latest",
+# "sys.uptime.latest",
+# ]
+# # vm_metric_exclude = [] ## Nothing is excluded by default
+# # vm_instances = true ## true by default
+#
+# ## Hosts
+# ## Typical host metrics (if omitted or empty, all metrics are collected)
+# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
+# # host_exclude [] # Inventory paths to exclude
+# host_metric_include = [
+# "cpu.coreUtilization.average",
+# "cpu.costop.summation",
+# "cpu.demand.average",
+# "cpu.idle.summation",
+# "cpu.latency.average",
+# "cpu.readiness.average",
+# "cpu.ready.summation",
+# "cpu.swapwait.summation",
+# "cpu.usage.average",
+# "cpu.usagemhz.average",
+# "cpu.used.summation",
+# "cpu.utilization.average",
+# "cpu.wait.summation",
+# "disk.deviceReadLatency.average",
+# "disk.deviceWriteLatency.average",
+# "disk.kernelReadLatency.average",
+# "disk.kernelWriteLatency.average",
+# "disk.numberReadAveraged.average",
+# "disk.numberWriteAveraged.average",
+# "disk.read.average",
+# "disk.totalReadLatency.average",
+# "disk.totalWriteLatency.average",
+# "disk.write.average",
+# "mem.active.average",
+# "mem.latency.average",
+# "mem.state.latest",
+# "mem.swapin.average",
+# "mem.swapinRate.average",
+# "mem.swapout.average",
+# "mem.swapoutRate.average",
+# "mem.totalCapacity.average",
+# "mem.usage.average",
+# "mem.vmmemctl.average",
+# "net.bytesRx.average",
+# "net.bytesTx.average",
+# "net.droppedRx.summation",
+# "net.droppedTx.summation",
+# "net.errorsRx.summation",
+# "net.errorsTx.summation",
+# "net.usage.average",
+# "power.power.average",
+# "storageAdapter.numberReadAveraged.average",
+# "storageAdapter.numberWriteAveraged.average",
+# "storageAdapter.read.average",
+# "storageAdapter.write.average",
+# "sys.uptime.latest",
+# ]
+# ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
+# # ip_addresses = ["ipv6", "ipv4" ]
+#
+# # host_metric_exclude = [] ## Nothing excluded by default
+# # host_instances = true ## true by default
+#
+#
+# ## Clusters
+# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # cluster_exclude = [] # Inventory paths to exclude
+# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
+# # cluster_metric_exclude = [] ## Nothing excluded by default
+# # cluster_instances = false ## false by default
+#
+# ## Datastores
+# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+# # datastore_exclude = [] # Inventory paths to exclude
+# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
+# # datastore_metric_exclude = [] ## Nothing excluded by default
+# # datastore_instances = false ## false by default
+#
+# ## Datacenters
+# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # datacenter_exclude = [] # Inventory paths to exclude
+# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
+# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
+# # datacenter_instances = false ## false by default
+#
+# ## Plugin Settings
+# ## separator character to use for measurement and field names (default: "_")
+# # separator = "_"
+#
+# ## number of objects to retrieve per query for realtime resources (vms and hosts)
+# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
+# # max_query_objects = 256
+#
+# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
+# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
+# # max_query_metrics = 256
+#
+# ## number of go routines to use for collection and discovery of objects and metrics
+# # collect_concurrency = 1
+# # discover_concurrency = 1
+#
+# ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
+# # object_discovery_interval = "300s"
+#
+# ## timeout applies to any of the api request made to vcenter
+# # timeout = "60s"
+#
+# ## When set to true, all samples are sent as integers. This makes the output
+# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all
+# ## samples from vCenter, with the exception of percentages, are integer
+# ## values, but under some conditions, some averaging takes place internally in
+# ## the plugin. Setting this flag to "false" will send values as floats to
+# ## preserve the full precision when averaging takes place.
+# # use_int_samples = true
+#
+# ## Custom attributes from vCenter can be very useful for queries in order to slice the
+# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
+# ## by default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# ## By default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# # custom_attribute_include = []
+# # custom_attribute_exclude = ["*"]
+#
+# ## Optional SSL Config
+# # ssl_ca = "/path/to/cafile"
+# # ssl_cert = "/path/to/certfile"
+# # ssl_key = "/path/to/keyfile"
+# ## Use SSL but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # A Webhooks Event collector
+# [[inputs.webhooks]]
+# ## Address and port to host Webhook listener on
+# service_address = ":1619"
+#
+# [inputs.webhooks.filestack]
+# path = "/filestack"
+#
+# [inputs.webhooks.github]
+# path = "/github"
+# # secret = ""
+#
+# [inputs.webhooks.mandrill]
+# path = "/mandrill"
+#
+# [inputs.webhooks.rollbar]
+# path = "/rollbar"
+#
+# [inputs.webhooks.papertrail]
+# path = "/papertrail"
+#
+# [inputs.webhooks.particle]
+# path = "/particle"
+
+
+# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
+# [[inputs.zipkin]]
+# # path = "/api/v1/spans" # URL path for span data
+# # port = 9411 # Port on which Telegraf listens
+
diff --git a/packer/jambonz-mini/gcp/files/vanilla_modules.conf.xml.patch b/packer/jambonz-mini/gcp/files/vanilla_modules.conf.xml.patch
new file mode 100644
index 0000000..57aca31
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/vanilla_modules.conf.xml.patch
@@ -0,0 +1,105 @@
+--- modules.conf.xml 2019-09-30 19:01:33.304020805 +0000
++++ modules.conf.xml.new 2019-09-30 23:11:23.371830901 +0000
+@@ -1,5 +1,6 @@
+
+
++
+
+
+
+@@ -10,7 +11,7 @@
+
+
+
+-
++
+
+
+
+@@ -39,7 +40,7 @@
+
+
+
+-
++
+
+
+
+@@ -47,28 +48,28 @@
+
+
+
+-
++
+
+
+-
++
+
+
+
+-
++
+
+-
+-
+-
++
++
++
+
+-
++
+
+
+
+
+-
+-
++
++
+
+-
++
+
+
+
+@@ -87,7 +88,7 @@
+
+
+
+-
++
+
+
+
+@@ -96,17 +97,17 @@
+
+
+
+-
++
+
+
+
+
+
+-
++
+
+
+
+-
++
+
+
+
+@@ -123,7 +124,7 @@
+
+
+
+-
++
+
+
+
diff --git a/packer/jambonz-mini/gcp/files/vimrc.local b/packer/jambonz-mini/gcp/files/vimrc.local
new file mode 100644
index 0000000..f0f40c1
--- /dev/null
+++ b/packer/jambonz-mini/gcp/files/vimrc.local
@@ -0,0 +1,5 @@
+source /usr/share/vim/vim80/defaults.vim
+let skip_defaults_vim = 1
+if has('mouse')
+ set mouse=r
+endif
diff --git a/packer/jambonz-mini/gcp-template.json b/packer/jambonz-mini/gcp/gcp-template.json
similarity index 100%
rename from packer/jambonz-mini/gcp-template.json
rename to packer/jambonz-mini/gcp/gcp-template.json
diff --git a/packer/jambonz-mini/gcp/scripts/install_apiban.sh b/packer/jambonz-mini/gcp/scripts/install_apiban.sh
new file mode 100644
index 0000000..e781a1d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_apiban.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+cd /usr/local/src/
+git clone https://github.com/palner/apiban.git
+sudo mkdir /usr/local/bin/apiban && sudo chmod 0755 /usr/local/bin/apiban
+sudo cp -r /usr/local/src/apiban/clients/go/apiban-iptables-client /usr/local/bin/apiban && sudo chmod +x /usr/local/bin/apiban/apiban-iptables-client
+sudo cp /tmp/config.json /usr/local/bin/apiban/config.json
+sudo chmod 0644 /usr/local/bin/apiban/config.json
+sudo cp /tmp/apiban.logrotate /etc/logrotate.d/apiban-client
+sudo chmod 0644 /etc/logrotate.d/apiban-client
+echo "*/4 * * * * root cd /usr/local/bin/apiban && ./apiban-iptables-client >/dev/null 2>&1" | sudo tee -a /etc/crontab
diff --git a/packer/jambonz-mini/scripts/install_app.sh b/packer/jambonz-mini/gcp/scripts/install_app.sh
similarity index 100%
rename from packer/jambonz-mini/scripts/install_app.sh
rename to packer/jambonz-mini/gcp/scripts/install_app.sh
diff --git a/packer/jambonz-mini/gcp/scripts/install_chrony.sh b/packer/jambonz-mini/gcp/scripts/install_chrony.sh
new file mode 100644
index 0000000..caece01
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_chrony.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo apt-get update
+sudo apt-get install -y chrony
+sudo systemctl enable chrony
diff --git a/packer/jambonz-mini/gcp/scripts/install_cloudwatch.sh b/packer/jambonz-mini/gcp/scripts/install_cloudwatch.sh
new file mode 100644
index 0000000..75b45e7
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_cloudwatch.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+#install cloudwatch
+sudo wget https://s3.amazonaws.com/amazoncloudwatch-agent/debian/amd64/latest/amazon-cloudwatch-agent.deb -O /home/admin/amazon-cloudwatch-agent.deb
+sudo dpkg -i -E /home/admin/amazon-cloudwatch-agent.deb
+sudo rm -rf /home/admin/amazon-cloudwatch-agent.deb
+
+# install config file for jambonz
+sudo cp -r /tmp/cloudwatch-config.json /opt/aws/amazon-cloudwatch-agent/bin/config.json
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_drachtio.sh b/packer/jambonz-mini/gcp/scripts/install_drachtio.sh
new file mode 100644
index 0000000..657605d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_drachtio.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+VERSION=$1
+
+echo "drachtio version to install is ${VERSION}"
+
+chmod 0777 /usr/local/src
+cd /usr/local/src
+git clone https://github.com/drachtio/drachtio-server.git -b ${VERSION}
+cd drachtio-server
+git submodule update --init --recursive
+./autogen.sh && mkdir -p build && cd $_ && ../configure --enable-tcmalloc=yes CPPFLAGS='-DNDEBUG -g -O2' && make -j 4 && sudo make install
+
+if [ "$2" = "gcp" ]; then
+ echo "installing drachtio for gcp"
+ sudo mv /tmp/drachtio.gcp.service /etc/systemd/system/drachtio.service
+ sudo mv /tmp/drachtio-5070.gcp.service /etc/systemd/system/drachtio-5070.service
+else
+ echo "installing drachtio for aws"
+ sudo mv /tmp/drachtio.service /etc/systemd/system
+ sudo mv /tmp/drachtio-5070.service /etc/systemd/system
+fi
+
+sudo mv /tmp/drachtio.conf.xml /etc
+sudo chmod 644 /etc/drachtio.conf.xml
+sudo chmod 644 /etc/systemd/system/drachtio.service
+sudo systemctl enable drachtio
+sudo systemctl restart drachtio
+sudo systemctl status drachtio.service
+
+sudo mv /tmp/drachtio-5070.conf.xml /etc
+sudo chmod 644 /etc/drachtio-5070.conf.xml
+sudo chmod 644 /etc/systemd/system/drachtio-5070.service
+sudo systemctl enable drachtio-5070
+sudo systemctl restart drachtio-5070
diff --git a/packer/jambonz-mini/gcp/scripts/install_fail2ban.sh b/packer/jambonz-mini/gcp/scripts/install_fail2ban.sh
new file mode 100644
index 0000000..cdf4034
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_fail2ban.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+sudo cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
+
+# comment out: overhead too high and apiban suffices
+#sudo bash -c "cat >> /etc/fail2ban/jail.local" << EOF
+
+
+#[drachtio-tcp]
+#maxretry = 1
+#bantime = 86400
+#enabled = true
+#filter = drachtio
+#port = 5060
+#protocol = tcp
+#logpath = /var/log/drachtio/drachtio.log
+#
+#[drachtio-udp]
+#maxretry = 1
+#bantime = 86400
+#enabled = true
+#filter = drachtio
+#port = 5060
+#protocol = udp
+#logpath = /var/log/drachtio/drachtio.log
+#
+#EOF
+
+#sudo cp /tmp/drachtio-fail2ban.conf /etc/fail2ban/filter.d/drachtio.conf
+#sudo chmod 0644 /etc/fail2ban/filter.d/drachtio.conf
+
+# add nginx jails and filters
+sudo cp /tmp/nginx-noscript.jail /etc/fail2ban/jail.d/nginx-noscript.conf
+sudo cp /tmp/nginx-noproxy.jail /etc/fail2ban/jail.d/nginx-noproxy.conf
+sudo cp /tmp/nginx-badbots.jail /etc/fail2ban/jail.d/nginx-badbots.conf
+
+sudo cp /tmp/nginx-noscript.filter /etc/fail2ban/filter.d/nginx-noscript.conf
+sudo cp /tmp/nginx-noproxy.filter /etc/fail2ban/filter.d/nginx-noproxy.conf
+sudo cp /tmp/nginx-badbots.filter /etc/fail2ban/filter.d/nginx-badbots.conf
+
+sudo chmod 0644 /etc/fail2ban/jail.d/*.conf
+sudo chmod 0644 /etc/fail2ban/filter.d/*.conf
+
+sudo systemctl enable fail2ban
+sudo systemctl restart fail2ban
diff --git a/packer/jambonz-mini/gcp/scripts/install_freeswitch.sh b/packer/jambonz-mini/gcp/scripts/install_freeswitch.sh
new file mode 100644
index 0000000..f2dfe82
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_freeswitch.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+FREESWITCH_VERSION=v1.10.5
+GRPC_VERSION=c66d2cc
+#GRPC_VERSION=v1.39.1
+#GOOGLE_API_VERSION=v1p1beta1-speech
+GOOGLE_API_VERSION=e9da6f8b469c52b83f900e820be30762e9e05c57
+AWS_SDK_VERSION=1.8.129
+LWS_VERSION=v3.2.3
+MODULES_VERSION=v0.6.15
+
+echo "freeswitch version to install is ${FREESWITCH_VERSION}"
+echo "drachtio modules version to install is ${MODULES_VERSION}"
+echo "GRPC version to install is ${GRPC_VERSION}"
+echo "GOOGLE_API_VERSION version to install is ${GOOGLE_API_VERSION}"
+echo "AWS_SDK_VERSION version to install is ${AWS_SDK_VERSION}"
+echo "LWS_VERSION version to install is ${LWS_VERSION}"
+
+export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
+
+cd /tmp
+tar xvfz SpeechSDK-Linux-1.26.0.tar.gz
+cd SpeechSDK-Linux-1.26.0
+sudo cp -r include /usr/local/include/MicrosoftSpeechSDK
+sudo cp -r lib/ /usr/local/lib/MicrosoftSpeechSDK
+if [ "$ARCH" == "arm64" ]; then
+ echo installing Microsoft arm64 libs...
+ sudo cp /usr/local/lib/MicrosoftSpeechSDK/arm64/libMicrosoft.*.so /usr/local/lib/
+ echo done
+fi
+if [ "$ARCH" == "amd64" ]; then
+ echo installing Microsoft x64 libs...
+ sudo cp /usr/local/lib/MicrosoftSpeechSDK/x64/libMicrosoft.*.so /usr/local/lib/
+ echo done
+fi
+
+cd /usr/local/src
+echo remove SpeechSDK-Linux-1.24.2
+sudo rm -Rf /tmp/SpeechSDK-Linux-1.24.2.tgz /tmp/SpeechSDK-Linux-1.24.2
+echo done
+
+echo config git
+git config --global pull.rebase true
+echo done
+git clone https://github.com/signalwire/freeswitch.git -b ${FREESWITCH_VERSION}
+git clone https://github.com/warmcat/libwebsockets.git -b ${LWS_VERSION}
+git clone https://github.com/drachtio/drachtio-freeswitch-modules.git -b ${MODULES_VERSION}
+git clone https://github.com/grpc/grpc -b master
+cd grpc && git checkout ${GRPC_VERSION} && cd ..
+
+cd freeswitch/libs
+git clone https://github.com/drachtio/nuance-asr-grpc-api.git -b main
+git clone https://github.com/drachtio/riva-asr-grpc-api.git -b main
+git clone https://github.com/drachtio/soniox-asr-grpc-api.git -b main
+git clone https://github.com/freeswitch/spandsp.git -b master
+git clone https://github.com/freeswitch/sofia-sip.git -b master
+git clone https://github.com/dpirch/libfvad.git
+git clone https://github.com/aws/aws-sdk-cpp.git -b ${AWS_SDK_VERSION}
+git clone https://github.com/googleapis/googleapis -b master
+cd googleapis && git checkout ${GOOGLE_API_VERSION} && cd ..
+git clone https://github.com/awslabs/aws-c-common.git
+
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_audio_fork /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_aws_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_aws_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_azure_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_azure_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_aws_lex /usr/local/src/freeswitch/src/mod/applications/mod_aws_lex
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_deepgram_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_deepgram_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_google_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_google_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_ibm_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_ibm_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_nuance_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_nuance_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_nvidia_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_nvidia_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_soniox_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_soniox_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_jambonz_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_jambonz_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_google_tts /usr/local/src/freeswitch/src/mod/applications/mod_google_tts
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_dialogflow /usr/local/src/freeswitch/src/mod/applications/mod_dialogflow
+
+sudo sed -i -r -e 's/(.*AM_CFLAGS\))/\1 -g -O0/g' /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork/Makefile.am
+sudo sed -i -r -e 's/(.*-std=c++11)/\1 -g -O0/g' /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork/Makefile.am
+
+# copy Makefiles and patches into place
+cp /tmp/configure.ac.extra /usr/local/src/freeswitch/configure.ac
+cp /tmp/Makefile.am.extra /usr/local/src/freeswitch/Makefile.am
+cp /tmp/modules.conf.in.extra /usr/local/src/freeswitch/build/modules.conf.in
+cp /tmp/modules.conf.vanilla.xml.extra /usr/local/src/freeswitch/conf/vanilla/autoload_configs/modules.conf.xml
+cp /tmp/avmd.conf.xml /usr/local/src/freeswitch/conf/vanilla/autoload_configs/avmd_conf.xml
+cp /tmp/switch_rtp.c.patch /usr/local/src/freeswitch/src
+cp /tmp/switch_core_media.c.patch /usr/local/src/freeswitch/src
+cp /tmp/mod_avmd.c.patch /usr/local/src/freeswitch/src/mod/applications/mod_avmd
+cp /tmp/mod_httapi.c.patch /usr/local/src/freeswitch/src/mod/applications/mod_httapi
+
+# patch freeswitch
+cd /usr/local/src/freeswitch/src
+patch < switch_rtp.c.patch
+patch < switch_core_media.c.patch
+cd /usr/local/src/freeswitch/src/mod/applications/mod_avmd
+patch < mod_avmd.c.patch
+cd /usr/local/src/freeswitch/src/mod/applications/mod_httapi
+patch < mod_httapi.c.patch
+
+# build libwebsockets
+cd /usr/local/src/libwebsockets
+sudo mkdir -p build && cd build && sudo cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo && sudo make && sudo make install
+
+# build libfvad
+cd /usr/local/src/freeswitch/libs/libfvad
+sudo autoreconf -i && sudo ./configure && sudo make -j 4 && sudo make install
+
+# build spandsp
+cd /usr/local/src/freeswitch/libs/spandsp
+./bootstrap.sh && ./configure && make -j 4 && sudo make install
+
+# build sofia
+cd /usr/local/src/freeswitch/libs/sofia-sip
+./bootstrap.sh && ./configure && make -j 4 && sudo make install
+
+# build aws-c-common
+cd /usr/local/src/freeswitch/libs/aws-c-common
+mkdir -p build && cd build
+cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS="-Wno-unused-parameter"
+make -j 4 && sudo make install
+
+# build aws-sdk-cpp
+cd /usr/local/src/freeswitch/libs/aws-sdk-cpp
+mkdir -p build && cd build
+cmake .. -DBUILD_ONLY="lexv2-runtime;transcribestreaming" -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS="-Wno-unused-parameter"
+make -j 4 && sudo make install
+
+# build grpc
+cd /usr/local/src/grpc
+git submodule update --init --recursive
+mkdir -p cmake/build
+cd cmake/build
+cmake -DBUILD_SHARED_LIBS=ON -DgRPC_SSL_PROVIDER=package -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo ../..
+make -j 4
+sudo make install
+
+# build googleapis
+cd /usr/local/src/freeswitch/libs/googleapis
+echo "Ref: https://github.com/GoogleCloudPlatform/cpp-samples/issues/113"
+sed -i 's/\$fields/fields/' google/maps/routes/v1/route_service.proto
+sed -i 's/\$fields/fields/' google/maps/routes/v1alpha/route_service.proto
+LANGUAGE=cpp make -j 4
+
+# build nuance protobufs
+echo "building protobuf stubs for Nuance asr"
+cd /usr/local/src/freeswitch/libs/nuance-asr-grpc-api
+LANGUAGE=cpp make
+
+# build nvidia protobufs
+echo "building protobuf stubs for nvidia riva asr"
+cd /usr/local/src/freeswitch/libs/riva-asr-grpc-api
+LANGUAGE=cpp make
+
+# build soniox protobufs
+echo "building protobuf stubs for sonioxasr"
+cd /usr/local/src/freeswitch/libs/soniox-asr-grpc-api
+LANGUAGE=cpp make
+
+# build freeswitch
+echo "building freeswitch"
+cd /usr/local/src/freeswitch
+sudo ./bootstrap.sh -j
+sudo ./configure --enable-tcmalloc=yes --with-lws=yes --with-extra=yes
+sudo make -j 4
+sudo make install
+sudo make cd-sounds-install cd-moh-install
+sudo cp /tmp/acl.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/event_socket.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/switch.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/conference.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo rm -Rf /usr/local/freeswitch/conf/dialplan/*
+sudo rm -Rf /usr/local/freeswitch/conf/sip_profiles/*
+sudo cp /tmp/mrf_dialplan.xml /usr/local/freeswitch/conf/dialplan
+sudo cp /tmp/mrf_sip_profile.xml /usr/local/freeswitch/conf/sip_profiles
+sudo cp /usr/local/src/freeswitch/conf/vanilla/autoload_configs/modules.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/freeswitch.service /etc/systemd/system
+sudo chown root:root -R /usr/local/freeswitch
+sudo chmod 644 /etc/systemd/system/freeswitch.service
+sudo sed -i -e 's/global_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8/global_codec_prefs=PCMU,PCMA,OPUS,G722/g' /usr/local/freeswitch/conf/vars.xml
+sudo sed -i -e 's/outbound_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8/outbound_codec_prefs=PCMU,PCMA,OPUS,G722/g' /usr/local/freeswitch/conf/vars.xml
+sudo systemctl enable freeswitch
+sudo cp /tmp/freeswitch_log_rotation /etc/cron.daily/freeswitch_log_rotation
+sudo chown root:root /etc/cron.daily/freeswitch_log_rotation
+sudo chmod a+x /etc/cron.daily/freeswitch_log_rotation
+
+echo "downloading soniox root verification certificate"
+cd /usr/local/freeswitch/certs
+wget https://raw.githubusercontent.com/grpc/grpc/master/etc/roots.pem
+
diff --git a/packer/jambonz-mini/gcp/scripts/install_grafana.sh b/packer/jambonz-mini/gcp/scripts/install_grafana.sh
new file mode 100644
index 0000000..0dde8f6
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_grafana.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+if [ "$1" = "yes" ]; then
+
+curl -sL https://packages.grafana.com/gpg.key | sudo apt-key add -
+echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee /etc/apt/sources.list.d/grafana.list
+sudo apt-get update
+sudo apt-get install -y grafana
+
+# move to port 3010
+sudo sed -i -e "s/;http_port = 3000/http_port = 3010/g" /etc/grafana/grafana.ini
+
+sudo mkdir /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-default.yaml /etc/grafana/provisioning/dashboards/default.yaml
+sudo mv /tmp/grafana-datasource.yml /etc/grafana/provisioning/datasources/datasource.yml
+
+sudo mv /tmp/grafana-dashboard-heplify.json /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-jambonz.json /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-servers.json /var/lib/grafana/dashboards
+
+sudo chown -R grafana:grafana /var/lib/grafana/dashboards
+sudo chown -R grafana:grafana /etc/grafana/provisioning/dashboards
+
+sudo systemctl enable grafana-server
+sudo systemctl start grafana-server
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_homer.sh b/packer/jambonz-mini/gcp/scripts/install_homer.sh
new file mode 100644
index 0000000..14717c6
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_homer.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+if [ "$1" == "yes" ]; then
+
+DB_USER=$2
+DB_PASS=$3
+
+curl -s https://packagecloud.io/install/repositories/qxip/sipcapture/script.deb.sh | sudo bash
+sudo apt-get install -y homer-app heplify-server
+
+sudo cp /usr/local/homer/etc/webapp_config.json.example /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/homer_user/$DB_USER/g" /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/homer_password/$DB_PASS/g" /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/localhost/127.0.0.1/g" /usr/local/homer/etc/webapp_config.json
+sudo homer-app -create-table-db-config
+sudo homer-app -populate-table-db-config
+sudo sed -i -e "s/DBUser\s*=\s*\"postgres\"/DBUser = \"$DB_USER\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/DBPass\s*=\s*\"\"/DBPass = \"$DB_PASS\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/PromAddr\s*=\s*\".*\"/PromAddr = \"0.0.0.0:9098\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/HEPWSAddr\s*=\s*\".*\"/HEPWSAddr = \"0.0.0.0:3050\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/AlegIDs\s*=\s*\[\]/AlegIDs = \[\"X-CID\"]/g" /etc/heplify-server.toml
+sudo sed -i -e "s/CustomHeader\s*=\s*\[\]/CustomHeader = \[\"X-Application-Sid\", \"X-Originating-Carrier\", \"X-MS-Teams-Tenant-FQDN\", \"X-Authenticated-User\"]/g" /etc/heplify-server.toml
+
+sudo systemctl enable homer-app
+sudo systemctl restart homer-app
+sudo systemctl status homer-app
+
+sudo systemctl enable heplify-server
+sudo systemctl restart heplify-server
+sudo systemctl status heplify-server
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_influxdb.sh b/packer/jambonz-mini/gcp/scripts/install_influxdb.sh
new file mode 100644
index 0000000..c82d28d
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_influxdb.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+sudo apt-get install -y apt-transport-https
+curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
+echo "deb https://repos.influxdata.com/debian buster stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
+sudo apt-get update
+sudo apt-get install -y influxdb
+sudo chmod a+x /usr/lib/influxdb/scripts/influxd-systemd-start.sh
+sudo systemctl enable influxdb
+sudo systemctl start influxdb
+
+sudo systemctl status influxdb.service
+sudo journalctl -xe
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_jaeger.sh b/packer/jambonz-mini/gcp/scripts/install_jaeger.sh
new file mode 100644
index 0000000..5af8e1c
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_jaeger.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+cd /tmp
+
+echo "installing jaeger"
+wget https://github.com/jaegertracing/jaeger/releases/download/v1.33.0/jaeger-1.33.0-linux-amd64.tar.gz
+sudo tar xvfz jaeger-1.33.0-linux-amd64.tar.gz
+sudo cp jaeger-1.33.0-linux-amd64/jaeger-all-in-one /usr/local/bin
+
+sudo cp jaeger.service /etc/systemd/system
+sudo chmod 644 /etc/systemd/system/jaeger.service
+sudo systemctl enable jaeger
+sudo systemctl start jaeger
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_mysql.sh b/packer/jambonz-mini/gcp/scripts/install_mysql.sh
new file mode 100644
index 0000000..253857e
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_mysql.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+DB_USER=$1
+DB_PASS=$2
+
+sudo apt install -y dirmngr
+sudo apt-key add - < /tmp/mysql-server.key
+echo "deb http://repo.mysql.com/apt/debian $(lsb_release -sc) mysql-8.0" | sudo tee /etc/apt/sources.list.d/mysql80.list
+sudo apt update
+sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/root-pass password JambonzR0ck\$"
+sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/re-root-pass password JambonzR0ck\$"
+sudo debconf-set-selections <<< "mysql-community-server mysql-server/default-auth-override select Use Legacy Authentication Method (Retain MySQL 5.x Compatibility)"
+sudo DEBIAN_FRONTEND=noninteractive apt install -y default-mysql-server
+#cd /etc/systemd/system
+#rm mysql.service
+#sudo systemctl enable mysql
+echo "starting mysql"
+sudo systemctl start mysql
+echo "creating database"
+
+# create the database and the user
+mysql -h localhost -u root -pJambonzR0ck\$ << END
+create database jambones;
+SET old_passwords=0;
+create user $DB_USER@'%' IDENTIFIED BY '$DB_PASS';
+grant all on jambones.* to $DB_USER@'%' with grant option;
+grant create user on *.* to $DB_USER@'%' with grant option;
+flush privileges;
+END
+
+# create the schema
+echo "creating schema"
+mysql -h localhost -u $DB_USER -p$DB_PASS -D jambones < /home/admin/apps/jambonz-api-server/db/jambones-sql.sql
+echo "seeding initial data"
+mysql -h localhost -u $DB_USER -p$DB_PASS -D jambones < /home/admin/apps/jambonz-api-server/db/seed-production-database-open-source.sql
+
diff --git a/packer/jambonz-mini/gcp/scripts/install_nginx.sh b/packer/jambonz-mini/gcp/scripts/install_nginx.sh
new file mode 100644
index 0000000..4368b3e
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_nginx.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+echo "installing nginx"
+
+sudo apt-get install -y nginx
+
+echo "installing apache utils for htpasswd"
+sudo apt-get install -y apache2-utils
+
+cd /etc/nginx/sites-available
+sudo mv /tmp/nginx.default default
+
+sudo systemctl enable nginx
+sudo systemctl restart nginx
+
+sudo systemctl status nginx
+sudo journalctl -xe
diff --git a/packer/jambonz-mini/gcp/scripts/install_node_red.sh b/packer/jambonz-mini/gcp/scripts/install_node_red.sh
new file mode 100644
index 0000000..2433ef1
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_node_red.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+#install node-red
+mkdir -p apps && cd $_
+git clone https://github.com/node-red/node-red.git
+cd node-red
+sudo npm install --unsafe-perm
+grunt build
+
+sudo mv /tmp/ecosystem.config.js /home/admin/apps
+sudo chown -R admin:admin /home/admin/apps
+
+sudo -u admin bash -c "pm2 start /home/admin/apps/ecosystem.config.js"
+sudo env PATH=$PATH:/usr/bin /usr/lib/node_modules/pm2/bin/pm2 startup systemd -u admin --hp /home/admin
+sudo -u admin bash -c "pm2 save"
+sudo systemctl enable pm2-admin.service
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_nodejs.sh b/packer/jambonz-mini/gcp/scripts/install_nodejs.sh
new file mode 100644
index 0000000..d8de438
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_nodejs.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+curl -sL https://deb.nodesource.com/setup_18.x | sudo bash - && sudo apt-get install -y nodejs
+sudo npm install -g npm@latest
+node -v
+npm -v
+sudo ls -lrt /root/.npm/
+sudo ls -lrt /root/.npm/_logs
+sudo ls -lrt /root/.npm/_cacache
+sudo chmod -R a+wx /root
+sudo chown -R 1000:1000 /root/.npm
+ls -lrt /root/.npm/
+ls -lrt /root/.npm/_logs
+ls -lrt /root/.npm/_cacache
diff --git a/packer/jambonz-mini/gcp/scripts/install_os_tuning.sh b/packer/jambonz-mini/gcp/scripts/install_os_tuning.sh
new file mode 100755
index 0000000..8957d07
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_os_tuning.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+sudo sed -i '/# End of file/i * hard nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i * soft nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i root hard nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i root soft nofile 65535' /etc/security/limits.conf
+sudo sed -i s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/g /etc/systemd/system.conf
+sudo sed -i s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/g /etc/systemd/user.conf
+
+sudo bash -c 'cat >> /etc/sysctl.conf << EOT
+net.core.rmem_max=26214400
+net.core.rmem_default=26214400
+vm.swappiness=0
+vm.dirty_expire_centisecs=200
+vm.dirty_writeback_centisecs=100
+EOT'
+
+sudo cp /tmp/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_postgresql.sh b/packer/jambonz-mini/gcp/scripts/install_postgresql.sh
new file mode 100644
index 0000000..8b96d3f
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_postgresql.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+DB_USER=$2
+DB_PASS=$3
+
+wget -q https://www.postgresql.org/media/keys/ACCC4CF8.asc -O- | sudo apt-key add -
+sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/postgresql.list'
+sudo apt-get update
+sudo apt-get install -y postgresql-12
+sudo systemctl daemon-reload
+sudo systemctl enable postgresql
+sudo systemctl restart postgresql
+
+sudo -u postgres psql -c "CREATE DATABASE homer_config;"
+sudo -u postgres psql -c "CREATE DATABASE homer_data;"
+sudo -u postgres psql -c "CREATE ROLE ${DB_USER} WITH SUPERUSER LOGIN PASSWORD '$DB_PASS';"
+sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE homer_config to ${DB_USER};"
+sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE homer_data to ${DB_USER};"
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/gcp/scripts/install_redis.sh b/packer/jambonz-mini/gcp/scripts/install_redis.sh
new file mode 100644
index 0000000..7a36e91
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_redis.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo apt-get install -y redis-server
+sudo systemctl enable redis-server
+sudo systemctl restart redis-server
diff --git a/packer/jambonz-mini/gcp/scripts/install_rtpengine.sh b/packer/jambonz-mini/gcp/scripts/install_rtpengine.sh
new file mode 100644
index 0000000..1711270
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_rtpengine.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+VERSION=$1
+
+echo "rtpengine version to install is ${VERSION}, cloud provider is $2"
+
+cd /usr/local/src
+git clone https://github.com/BelledonneCommunications/bcg729.git
+cd bcg729
+cmake . -DCMAKE_INSTALL_PREFIX=/usr && make && sudo make install chdir=/usr/local/src/bcg729
+cd /usr/local/src
+
+git clone https://github.com/warmcat/libwebsockets.git -b v3.2.3
+cd /usr/local/src/libwebsockets
+sudo mkdir -p build && cd build && sudo cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo && sudo make && sudo make install
+
+cd /usr/local/src
+git clone https://github.com/sipwise/rtpengine.git -b ${VERSION}
+cd rtpengine
+make -j 4 with_transcoding=yes with_iptables_option=yes with-kernel
+
+# copy iptables extension into place
+cp ./iptables-extension/libxt_RTPENGINE.so `pkg-config xtables --variable=xtlibdir`
+
+# install kernel module
+mkdir /lib/modules/`uname -r`/updates/
+cp ./kernel-module/xt_RTPENGINE.ko /lib/modules/`uname -r`/updates
+depmod -a
+modprobe xt_RTPENGINE
+cat << EOF >> /etc/modules
+xt_RTPENGINE
+EOF
+
+echo 'add 42' > /proc/rtpengine/control
+iptables -I INPUT -p udp --dport 40000:60000 -j RTPENGINE --id 42
+
+if [ "$2" = "gcp" ]; then
+ echo "installing rtpengine for gcp"
+ sudo mv /tmp/rtpengine.gcp.service /etc/systemd/system/rtpengine.service
+else
+ echo "installing rtpengine for gcp"
+ sudo mv /tmp/rtpengine.service /etc/systemd/system/rtpengine.service
+fi
+
+cp /usr/local/src/rtpengine/daemon/rtpengine /usr/local/bin
+cp /usr/local/src/rtpengine/recording-daemon/rtpengine-recording /usr/local/bin/
+sudo mv /tmp/rtpengine-recording.service /etc/systemd/system
+sudo mv /tmp/rtpengine-recording.ini /etc/rtpengine-recording.ini
+sudo chmod 644 /etc/systemd/system/rtpengine.service
+sudo chmod 644 /etc/systemd/system/rtpengine-recording.service
+sudo chmod 644 /etc/rtpengine-recording.ini
+mkdir -p /var/spool/recording
+mkdir -p /recording
+sudo systemctl enable rtpengine
+sudo systemctl enable rtpengine-recording
+sudo systemctl start rtpengine
+sudo systemctl start rtpengine-recording
diff --git a/packer/jambonz-mini/gcp/scripts/install_telegraf.sh b/packer/jambonz-mini/gcp/scripts/install_telegraf.sh
new file mode 100644
index 0000000..48e60ea
--- /dev/null
+++ b/packer/jambonz-mini/gcp/scripts/install_telegraf.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+INFLUXDB_IP=$2
+
+cd /tmp
+wget -q https://repos.influxdata.com/influxdata-archive_compat.key
+gpg --with-fingerprint --show-keys ./influxdata-archive_compat.key
+cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
+echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
+
+#curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
+#curl -sLhttps://repos.influxdata.com/influxdata-archive_compat.key | sudo apt-key add -
+#echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
+
+sudo apt-get update
+sudo apt-get install -y telegraf
+
+sudo cp /tmp/telegraf.conf /etc/telegraf/telegraf.conf
+sudo sed -i -e "s/influxdb:8086/$INFLUXDB_IP:8086/g" /etc/telegraf/telegraf.conf
+
+sudo systemctl enable telegraf
+sudo systemctl start telegraf
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/README.md b/packer/jambonz-mini/proxmox/README.md
new file mode 100644
index 0000000..70c5d68
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/README.md
@@ -0,0 +1,33 @@
+# packer build of jambonz-mini VM template for Proxmox
+
+A [packer](https://www.packer.io/) template to build an proxmox VM template containing everything needed to run jambonz on a single VM instance. The base linux distro is Debian 11 (bullseye).
+
+Once the VM template has been created using this template, the associated terraform template should be used to deploy the final jambonz-mini server.
+
+## Prerequisites
+In order to run this packer script you must first create a VM template on your Packer node that has a basic Debian 11 install meeting the following requirements:
+- an 'admin' user has been created that has sudo privileges
+- the 'admin' user should have your public ssh key installed to allow passwordless access
+- the VM template should have 4 CPU cores
+
+## Installing
+
+Assuming that you have created a variables.json file to hold your variable values, you would simply do this:
+```
+$ packer build -color=false -var-file=variables.json template.json template.json
+```
+
+### variables
+There are many variables that can be specified either on the `packer build` command line, or in a separate variables.json file.
+
+- `proxmox_url`: the url of the proxmox GUI api server (e.g.https://:8006/api2/json)
+- `proxmox_user`: user to log into proxmox GUI (e.g. root@pam)
+- `proxmox_password`: password for proxmox GUI user
+- `proxmox_node`: name of the promox node
+- `proxmox_source_vm_private_key_file`: path to private ssh key on local machine, used to ssh to source vm without a password
+- `proxmox_clone_vm`: name of the VM template to clone and build from
+- `proxmox_vm_id`: vm id to assign to the VM build server
+- `proxmox_bridge`: name of the proxmox bridge to attach the VM build server to
+- `proxmox_ip`: IP address to assign to the VM build server
+- `proxmox_gateway`: gateway for the VM build server
+```
diff --git a/packer/jambonz-mini/proxmox/files/20auto-upgrades b/packer/jambonz-mini/proxmox/files/20auto-upgrades
new file mode 100644
index 0000000..f066dcb
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/20auto-upgrades
@@ -0,0 +1,2 @@
+APT::Periodic::Update-Package-Lists "0";
+APT::Periodic::Unattended-Upgrade "0";
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/Makefile.am.extra b/packer/jambonz-mini/proxmox/files/Makefile.am.extra
new file mode 100644
index 0000000..7ba2f2e
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/Makefile.am.extra
@@ -0,0 +1,1056 @@
+EXTRA_DIST =
+SUBDIRS = . src build tests/unit
+AUTOMAKE_OPTIONS = foreign subdir-objects
+NAME = freeswitch
+
+if SYSTEM_APR
+AM_LIBAPR_CFLAGS := $(shell apr-1-config --cflags)
+AM_LIBAPR_CPPFLAGS := $(shell apr-1-config --cppflags --includes)
+AM_LIBAPR_LDFLAGS := $(shell apr-1-config --ldflags)
+AM_LIBAPR_LIBS := $(shell apr-1-config \--libs)
+AM_LIBAPR_LINKLIBTOOL := $(shell apr-1-config \--link-libtool)
+else
+AM_LIBAPR_CFLAGS := $(shell ./libs/apr/apr-1-config --cflags)
+AM_LIBAPR_CPPFLAGS := $(shell ./libs/apr/apr-1-config --cppflags --includes)
+AM_LIBAPR_LDFLAGS := $(shell ./libs/apr/apr-1-config --ldflags)
+AM_LIBAPR_LIBS := $(subst $(switch_builddir)/,,$(shell ./libs/apr/apr-1-config \--libs))
+endif
+if SYSTEM_APRUTIL
+AM_LIBAPU_CPPFLAGS := $(shell apu-1-config --includes)
+AM_LIBAPU_LDFLAGS := $(shell apu-1-config --ldflags)
+AM_LIBAPU_LIBS := $(shell apu-1-config \--libs)
+AM_LIBAPU_LINKLIBTOOL := $(shell apu-1-config \--link-libtool)
+else
+AM_LIBAPU_CPPFLAGS := $(shell ./libs/apr-util/apu-1-config --includes)
+AM_LIBAPU_LDFLAGS := $(shell ./libs/apr-util/apu-1-config --ldflags)
+AM_LIBAPU_LIBS := $(subst $(switch_builddir)/,,$(shell ./libs/apr-util/apu-1-config \--libs))
+endif
+
+
+AM_CFLAGS = $(SWITCH_AM_CFLAGS) $(SWITCH_ANSI_CFLAGS)
+AM_CPPFLAGS =
+AM_CPPFLAGS += -I$(switch_srcdir)/libs/libvpx
+AM_CPPFLAGS += $(SWITCH_AM_CXXFLAGS)
+AM_LDFLAGS = $(SWITCH_AM_LDFLAGS) $(AM_LIBAPR_LDFLAGS) $(AM_LIBAPU_LDFLAGS)
+
+DEFAULT_SOUNDS=en-us-callie-8000
+MY_DEFAULT_ARGS= --build=$(build) --host=$(host) --target=$(target) --prefix="$(prefix)" --exec_prefix="$(exec_prefix)" --libdir="$(libdir)"
+
+.INTERMEDIATE: -ldl -liconv -lpthread
+
+.DEFAULT: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @target=`echo $@ | sed -e 's|^.*-||'`; \
+ target_prefix=`echo $@ | sed -e 's|-.*$$||'`; \
+ sound_perfix=`echo $@ | sed -e 's|-.*||'`; \
+ moh_version=`cat $(switch_srcdir)/build/moh_version.txt`;\
+ full_sound_dir=`echo $@ | sed -e 's|^sounds||' | sed -e 's|^-||' | sed -e 's|-install$$||'`; \
+ test ! -z $$full_sound_dir || full_sound_dir=`echo $(DEFAULT_SOUNDS)`; \
+ base_sound_dir=`echo $$full_sound_dir | sed -e 's|-[^-]*000$$||' ` ;\
+ sounds_version=`grep $$base_sound_dir $(switch_srcdir)/build/sounds_version.txt | cut -d ' ' -f2`;\
+ soundfile=`echo freeswitch-sounds-$$full_sound_dir-$$moh_version.tar.gz`; \
+ echo $$full_sound_dir | grep music >/dev/null || soundfile=`echo freeswitch-sounds-$$full_sound_dir-$$sounds_version.tar.gz`; \
+ args="$@"; if test -z "$$args" || test "$${args#-l*}" = "$$args"; then \
+ if test "$$target_prefix" = "sounds"; then \
+ if test "$$target" = "install"; then \
+ $(GETSOUNDS) $$soundfile $(DESTDIR)$(soundsdir)/;\
+ else \
+ $(GETSOUNDS) $$soundfile ; \
+ fi; \
+ else \
+ if test "$$target" = "install"; then \
+ $(MAKE) $(AM_MAKEFLAGS) core_install && cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ; \
+ else \
+ if test "$$target" = "clean"; then \
+ cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ;\
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) core && cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@ ;\
+ fi; \
+ fi; \
+ fi; fi
+
+sounds: sounds-en-us-callie-8000
+sounds-install: sounds-en-us-callie-8000-install
+sounds-allison: sounds-en-us-allison-8000
+sounds-allison-install: sounds-en-us-allison-8000-install
+sounds-ru: sounds-ru-RU-elena-8000
+sounds-ru-install: sounds-ru-RU-elena-8000-install
+sounds-fr: sounds-fr-ca-june-8000
+sounds-fr-install: sounds-fr-ca-june-8000-install
+moh: sounds-music-8000
+moh-install: sounds-music-8000-install
+
+hd-sounds: sounds sounds-en-us-callie-16000
+hd-sounds-install: sounds-install sounds-en-us-callie-16000-install
+hd-sounds-allison: sounds-allison sounds-en-us-allison-16000
+hd-sounds-allison-install: sounds-allison-install sounds-en-us-allison-16000-install
+hd-sounds-ru: sounds-ru sounds-ru-RU-elena-16000
+hd-sounds-ru-install: sounds-ru-install sounds-ru-RU-elena-16000-install
+hd-sounds-fr: sounds-fr-ca-june-16000
+hd-sounds-fr-install: sounds-fr-ca-june-16000-install
+hd-moh: moh sounds-music-16000
+hd-moh-install: moh-install sounds-music-16000-install
+
+uhd-sounds: hd-sounds sounds-en-us-callie-32000
+uhd-sounds-install: hd-sounds-install sounds-en-us-callie-32000-install
+uhd-sounds-allison: hd-sounds-allison sounds-en-us-allison-32000
+uhd-sounds-allison-install: hd-sounds-allison-install sounds-en-us-allison-32000-install
+uhd-sounds-ru: hd-sounds-ru sounds-ru-RU-elena-32000
+uhd-sounds-ru-install: hd-sounds-ru-install sounds-ru-RU-elena-32000-install
+uhd-sounds-fr: sounds-fr-ca-june-32000
+uhd-sounds-fr-install: sounds-fr-ca-june-32000-install
+uhd-moh: hd-moh sounds-music-32000
+uhd-moh-install: hd-moh-install sounds-music-32000-install
+
+cd-sounds: uhd-sounds sounds-en-us-callie-48000
+cd-sounds-install: uhd-sounds-install sounds-en-us-callie-48000-install
+cd-sounds-allison: uhd-sounds-allison sounds-en-us-allison-48000
+cd-sounds-allison-install: uhd-sounds-allison-install sounds-en-us-allison-48000-install
+cd-sounds-ru: uhd-sounds-ru sounds-ru-RU-elena-48000
+cd-sounds-ru-install: uhd-sounds-ru-install sounds-ru-RU-elena-48000-install
+cd-sounds-fr: sounds-fr-ca-june-48000
+cd-sounds-fr-install: sounds-fr-ca-june-48000-install
+cd-moh: uhd-moh sounds-music-48000
+cd-moh-install: uhd-moh-install sounds-music-48000-install
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+all-recursive: libfreeswitch.la
+clean-recusive: clean_core
+install-recursive: install-libLTLIBRARIES install-binPROGRAMS
+
+CORE_CFLAGS = $(AM_LIBAPR_CFLAGS) $(AM_LIBAPR_CPPFLAGS)
+CORE_CFLAGS += $(AM_LIBAPU_CPPFLAGS)
+CORE_CFLAGS += -I$(switch_srcdir)/libs/srtp/include
+if ENABLE_LIBYUV
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libyuv/include
+CORE_CFLAGS += -DSWITCH_HAVE_YUV
+endif
+CORE_CFLAGS += -I$(switch_srcdir)/libs/srtp/crypto/include -Ilibs/srtp/crypto/include
+CORE_CFLAGS += $(SPANDSP_CFLAGS)
+if ENABLE_LIBVPX
+CORE_CFLAGS += -DSWITCH_HAVE_VPX
+endif
+
+APR_LIBS = $(AM_LIBAPU_LIBS) $(AM_LIBAPR_LIBS)
+CORE_LIBS=
+
+if ENABLE_LIBVPX
+CORE_LIBS += libs/libvpx/libvpx.a
+endif
+if SYSTEM_APRUTIL
+CORE_LIBS += $(AM_LIBAPU_LINKLIBTOOL)
+else
+CORE_LIBS += libs/apr-util/libaprutil-1.la
+endif
+if SYSTEM_APR
+CORE_LIBS += $(AM_LIBAPR_LINKLIBTOOL)
+else
+CORE_LIBS += libs/apr/libapr-1.la
+endif
+
+if ENABLE_SRTP
+CORE_CFLAGS += -DENABLE_SRTP
+CORE_LIBS += libs/srtp/libsrtp.la
+endif
+
+MOD_LINK = $(switch_srcdir)/libfreeswitch.la
+CLEANFILES = src/include/switch_version.h src/include/switch_swigable_cpp.h
+BUILT_SOURCES = src/mod/modules.inc src/include/switch_version.h src/include/switch_swigable_cpp.h
+
+if HAVE_ODBC
+CORE_CFLAGS += -DSWITCH_HAVE_ODBC $(ODBC_INC_FLAGS)
+endif
+
+if HAVE_PNG
+CORE_CFLAGS += -DSWITCH_HAVE_PNG $(LIBPNG_CFLAGS)
+endif
+
+if HAVE_FREETYPE
+CORE_CFLAGS += -DSWITCH_HAVE_FREETYPE $(LIBFREETYPE_CFLAGS)
+endif
+
+if HAVE_GUMBO
+CORE_CFLAGS += -DSWITCH_HAVE_GUMBO $(LIBGUMBO_CFLAGS)
+endif
+
+if HAVE_FVAD
+CORE_CFLAGS += -DSWITCH_HAVE_FVAD $(LIBFVAD_CFLAGS)
+endif
+
+# DH: LWS
+if HAVE_LWS
+CORE_CFLAGS += -DSWITCH_HAVE_LWS $(LWS_CFLAGS)
+LWS_LIBS += -lwebsockets
+endif
+
+# DH: GRPC
+if HAVE_GRPC
+CORE_CFLAGS += -DSWITCH_HAVE_GRPC $(GRPC_CFLAGS)
+GRPC_LIBS += -lgrpc++_reflection -lprotobuf
+endif
+
+##
+## libfreeswitch
+##
+noinst_LTLIBRARIES =
+if ENABLE_LIBYUV
+noinst_LTLIBRARIES += libfreeswitch_libyuv.la
+endif
+
+if ENABLE_LIBYUV
+libfreeswitch_libyuv_la_SOURCES = \
+libs/libyuv/source/compare.cc \
+libs/libyuv/source/compare_common.cc \
+libs/libyuv/source/compare_gcc.cc \
+libs/libyuv/source/compare_mmi.cc \
+libs/libyuv/source/compare_msa.cc \
+libs/libyuv/source/compare_neon64.cc \
+libs/libyuv/source/compare_neon.cc \
+libs/libyuv/source/compare_win.cc \
+libs/libyuv/source/convert_argb.cc \
+libs/libyuv/source/convert.cc \
+libs/libyuv/source/convert_from_argb.cc \
+libs/libyuv/source/convert_from.cc \
+libs/libyuv/source/convert_jpeg.cc \
+libs/libyuv/source/convert_to_argb.cc \
+libs/libyuv/source/convert_to_i420.cc \
+libs/libyuv/source/cpu_id.cc \
+libs/libyuv/source/mjpeg_decoder.cc \
+libs/libyuv/source/mjpeg_validate.cc \
+libs/libyuv/source/planar_functions.cc \
+libs/libyuv/source/rotate_any.cc \
+libs/libyuv/source/rotate_argb.cc \
+libs/libyuv/source/rotate.cc \
+libs/libyuv/source/rotate_common.cc \
+libs/libyuv/source/rotate_gcc.cc \
+libs/libyuv/source/rotate_mmi.cc \
+libs/libyuv/source/rotate_msa.cc \
+libs/libyuv/source/rotate_neon64.cc \
+libs/libyuv/source/rotate_neon.cc \
+libs/libyuv/source/rotate_win.cc \
+libs/libyuv/source/row_any.cc \
+libs/libyuv/source/row_common.cc \
+libs/libyuv/source/row_gcc.cc \
+libs/libyuv/source/row_mmi.cc \
+libs/libyuv/source/row_msa.cc \
+libs/libyuv/source/row_neon64.cc \
+libs/libyuv/source/row_neon.cc \
+libs/libyuv/source/row_win.cc \
+libs/libyuv/source/scale_any.cc \
+libs/libyuv/source/scale_argb.cc \
+libs/libyuv/source/scale.cc \
+libs/libyuv/source/scale_common.cc \
+libs/libyuv/source/scale_gcc.cc \
+libs/libyuv/source/scale_mmi.cc \
+libs/libyuv/source/scale_msa.cc \
+libs/libyuv/source/scale_neon64.cc \
+libs/libyuv/source/scale_neon.cc \
+libs/libyuv/source/scale_win.cc \
+libs/libyuv/source/video_common.cc
+
+
+libfreeswitch_libyuv_la_CPPFLAGS = -O2 -fomit-frame-pointer -Ilibs/libyuv/include
+CORE_LIBS+=libfreeswitch_libyuv.la
+endif
+
+if HAVE_GRPC
+GOOGLEAPIS_GENS_PATH = libs/googleapis/gens
+
+nodist_libfreeswitch_libgoogleapis_la_SOURCES = \
+libs/googleapis/gens/google/api/monitoring.grpc.pb.cc \
+libs/googleapis/gens/google/api/annotations.grpc.pb.cc \
+libs/googleapis/gens/google/api/http.pb.cc \
+libs/googleapis/gens/google/api/quota.pb.cc \
+libs/googleapis/gens/google/api/quota.grpc.pb.cc \
+libs/googleapis/gens/google/api/backend.grpc.pb.cc \
+libs/googleapis/gens/google/api/service.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitored_resource.pb.cc \
+libs/googleapis/gens/google/api/consumer.pb.cc \
+libs/googleapis/gens/google/api/annotations.pb.cc \
+libs/googleapis/gens/google/api/metric.pb.cc \
+libs/googleapis/gens/google/api/logging.pb.cc \
+libs/googleapis/gens/google/api/auth.grpc.pb.cc \
+libs/googleapis/gens/google/api/distribution.grpc.pb.cc \
+libs/googleapis/gens/google/api/label.grpc.pb.cc \
+libs/googleapis/gens/google/api/launch_stage.grpc.pb.cc \
+libs/googleapis/gens/google/api/launch_stage.pb.cc \
+libs/googleapis/gens/google/api/httpbody.grpc.pb.cc \
+libs/googleapis/gens/google/api/config_change.grpc.pb.cc \
+libs/googleapis/gens/google/api/logging.grpc.pb.cc \
+libs/googleapis/gens/google/api/context.pb.cc \
+libs/googleapis/gens/google/api/system_parameter.pb.cc \
+libs/googleapis/gens/google/api/distribution.pb.cc \
+libs/googleapis/gens/google/api/control.pb.cc \
+libs/googleapis/gens/google/api/consumer.grpc.pb.cc \
+libs/googleapis/gens/google/api/label.pb.cc \
+libs/googleapis/gens/google/api/documentation.pb.cc \
+libs/googleapis/gens/google/api/log.pb.cc \
+libs/googleapis/gens/google/api/usage.grpc.pb.cc \
+libs/googleapis/gens/google/api/backend.pb.cc \
+libs/googleapis/gens/google/api/control.grpc.pb.cc \
+libs/googleapis/gens/google/api/log.grpc.pb.cc \
+libs/googleapis/gens/google/api/source_info.grpc.pb.cc \
+libs/googleapis/gens/google/api/billing.pb.cc \
+libs/googleapis/gens/google/api/auth.pb.cc \
+libs/googleapis/gens/google/api/resource.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/check_error.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/check_error.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/distribution.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/distribution.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/http_request.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/operation.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.pb.cc \
+libs/googleapis/gens/google/api/servicecontrol/v1/operation.pb.cc \
+libs/googleapis/gens/google/api/metric.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitored_resource.grpc.pb.cc \
+libs/googleapis/gens/google/api/http.grpc.pb.cc \
+libs/googleapis/gens/google/api/httpbody.pb.cc \
+libs/googleapis/gens/google/api/endpoint.pb.cc \
+libs/googleapis/gens/google/api/documentation.grpc.pb.cc \
+libs/googleapis/gens/google/api/system_parameter.grpc.pb.cc \
+libs/googleapis/gens/google/api/endpoint.grpc.pb.cc \
+libs/googleapis/gens/google/api/service.pb.cc \
+libs/googleapis/gens/google/api/source_info.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/resources.grpc.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/resources.pb.cc \
+libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.grpc.pb.cc \
+libs/googleapis/gens/google/api/billing.grpc.pb.cc \
+libs/googleapis/gens/google/api/usage.pb.cc \
+libs/googleapis/gens/google/api/config_change.pb.cc \
+libs/googleapis/gens/google/api/context.grpc.pb.cc \
+libs/googleapis/gens/google/api/monitoring.pb.cc \
+libs/googleapis/gens/google/api/field_behavior.pb.cc \
+libs/googleapis/gens/google/api/client.pb.cc \
+libs/googleapis/gens/google/rpc/error_details.grpc.pb.cc \
+libs/googleapis/gens/google/rpc/code.pb.cc \
+libs/googleapis/gens/google/rpc/status.pb.cc \
+libs/googleapis/gens/google/rpc/status.grpc.pb.cc \
+libs/googleapis/gens/google/rpc/error_details.pb.cc \
+libs/googleapis/gens/google/rpc/code.grpc.pb.cc \
+libs/googleapis/gens/google/longrunning/operations.grpc.pb.cc \
+libs/googleapis/gens/google/longrunning/operations.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/cloud_speech.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/cloud_speech.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/resource.pb.cc \
+libs/googleapis/gens/google/cloud/speech/v1p1beta1/resource.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.pb.cc \
+libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/http_request.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/log_severity.grpc.pb.cc \
+libs/googleapis/gens/google/logging/type/log_severity.pb.cc \
+libs/googleapis/gens/google/logging/type/http_request.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_metrics.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/log_entry.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_config.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_config.pb.cc \
+libs/googleapis/gens/google/logging/v2/log_entry.grpc.pb.cc \
+libs/googleapis/gens/google/logging/v2/logging_metrics.grpc.pb.cc \
+libs/googleapis/gens/google/type/date.grpc.pb.cc \
+libs/googleapis/gens/google/type/timeofday.pb.cc \
+libs/googleapis/gens/google/type/latlng.grpc.pb.cc \
+libs/googleapis/gens/google/type/money.pb.cc \
+libs/googleapis/gens/google/type/date.pb.cc \
+libs/googleapis/gens/google/type/postal_address.grpc.pb.cc \
+libs/googleapis/gens/google/type/dayofweek.grpc.pb.cc \
+libs/googleapis/gens/google/type/dayofweek.pb.cc \
+libs/googleapis/gens/google/type/timeofday.grpc.pb.cc \
+libs/googleapis/gens/google/type/color.pb.cc \
+libs/googleapis/gens/google/type/postal_address.pb.cc \
+libs/googleapis/gens/google/type/latlng.pb.cc \
+libs/googleapis/gens/google/type/color.grpc.pb.cc \
+libs/googleapis/gens/google/type/money.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/gcs.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/environment.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/fulfillment.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/validation_result.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.grpc.pb.cc \
+libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.pb.cc
+
+libfreeswitch_libgoogleapis_la_CPPFLAGS = -I/usr/local/include -I$(GOOGLEAPIS_GENS_PATH) -std=c++17 -pthread
+
+# nuance asr
+NUANCE_GENS_PATH = libs/nuance-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libnuanceapis_la_SOURCES = \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/error_details.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status_code.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/error_details.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status_code.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/rpc/status.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/result.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/resource.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/recognizer.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/result.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/resource.grpc.pb.cc \
+libs/nuance-asr-grpc-api/stubs/nuance/asr/v1/recognizer.grpc.pb.cc
+
+libfreeswitch_libnuanceapis_la_CPPFLAGS = -I/usr/local/include -I$(NUANCE_GENS_PATH) -std=c++17 -pthread
+
+#nvidia asr
+NVIDIA_GENS_PATH = libs/riva-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libnvidiaapis_la_SOURCES = \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_asr.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_audio.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_asr.grpc.pb.cc \
+libs/riva-asr-grpc-api/stubs/riva/proto/riva_audio.grpc.pb.cc
+
+libfreeswitch_libnvidiaapis_la_CPPFLAGS = -I/usr/local/include -I$(NVIDIA_GENS_PATH) -std=c++17 -pthread
+
+#soniox asr
+SONIOX_GENS_PATH = libs/soniox-asr-grpc-api/stubs/
+
+nodist_libfreeswitch_libsonioxapis_la_SOURCES = \
+libs/soniox-asr-grpc-api/stubs/soniox/speech_service.pb.cc \
+libs/soniox-asr-grpc-api/stubs/soniox/speech_service.grpc.pb.cc
+
+libfreeswitch_libsonioxapis_la_CPPFLAGS = -I/usr/local/include -I$(SONIOX_GENS_PATH) -std=c++17 -pthread
+
+CORE_LIBS+=libfreeswitch_libgoogleapis.la libfreeswitch_libnuanceapis.la libfreeswitch_libnvidiaapis.la libfreeswitch_libsonioxapis.la
+noinst_LTLIBRARIES += libfreeswitch_libgoogleapis.la libfreeswitch_libnuanceapis.la libfreeswitch_libnvidiaapis.la libfreeswitch_libsonioxapis.la
+
+endif
+
+lib_LTLIBRARIES = libfreeswitch.la
+libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(SOFIA_SIP_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(GRPC_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS) $(SPANDSP_LIBS) $(SOFIA_SIP_LIBS)
+libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+if HAVE_PNG
+libfreeswitch_la_LIBADD += $(LIBPNG_LIBS)
+endif
+
+if HAVE_ODBC
+libfreeswitch_la_LDFLAGS += $(ODBC_LIB_FLAGS)
+endif
+
+if ENABLE_ZRTP
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libzrtp/third_party/bgaes
+CORE_CFLAGS += -I$(switch_srcdir)/libs/libzrtp/third_party/bnlib
+CORE_CFLAGS += -isystem $(switch_srcdir)/libs/libzrtp/include
+ZRTP_LDFLAGS = -L$(switch_srcdir)/libs/libzrtp/third_party/bnlib
+ZRTP_LDFLAGS += -L$(switch_srcdir)/libs/libzrtp
+ZRTP_LIBS = -lbn -lzrtp
+libfreeswitch_la_LDFLAGS += $(ZRTP_LDFLAGS)
+libfreeswitch_la_LIBADD += $(ZRTP_LIBS)
+CORE_LIBS += libs/libzrtp/libzrtp.a
+LIBS += libs/libzrtp/third_party/bnlib/libbn.a
+endif
+
+library_includetestdir = $(includedir)/test
+library_includetest_HEADERS = \
+ src/include/test/switch_fct.h \
+ src/include/test/switch_test.h
+
+library_includedir = $(includedir)
+library_include_HEADERS = \
+ src/include/switch_am_config.h \
+ src/include/switch.h \
+ src/include/switch_apr.h \
+ src/include/switch_buffer.h \
+ src/include/switch_caller.h \
+ src/include/switch_channel.h \
+ src/include/switch_console.h \
+ src/include/switch_core_event_hook.h \
+ src/include/switch_scheduler.h \
+ src/include/switch_core.h \
+ src/include/switch_core_media.h \
+ src/include/switch_core_video.h \
+ src/include/switch_core_db.h \
+ src/include/switch_mprintf.h \
+ src/include/switch_config.h \
+ src/include/switch_event.h \
+ src/include/switch_frame.h \
+ src/include/switch_ivr.h \
+ src/include/switch_dso.h \
+ src/include/switch_loadable_module.h \
+ src/include/switch_module_interfaces.h \
+ src/include/switch_platform.h \
+ src/include/switch_resample.h \
+ src/include/switch_regex.h \
+ src/include/switch_types.h \
+ src/include/switch_utils.h \
+ src/include/switch_rtp.h \
+ src/include/switch_jitterbuffer.h \
+ src/include/switch_estimators.h \
+ src/include/switch_rtcp_frame.h \
+ src/include/switch_stun.h \
+ src/include/switch_nat.h \
+ src/include/switch_log.h \
+ src/include/switch_xml.h \
+ src/include/switch_xml_config.h \
+ src/include/switch_cpp.h \
+ src/include/switch_curl.h \
+ src/include/switch_cJSON.h \
+ src/include/switch_cJSON_Utils.h \
+ src/include/switch_json.h \
+ src/include/switch_utf8.h \
+ src/include/switch_msrp.h \
+ src/include/switch_vpx.h \
+ src/include/switch_vad.h \
+ libs/libteletone/src/libteletone_detect.h \
+ libs/libteletone/src/libteletone_generate.h \
+ libs/libteletone/src/libteletone.h \
+ src/include/switch_limit.h \
+ src/include/switch_odbc.h \
+ src/include/switch_hashtable.h \
+ src/include/switch_image.h
+
+nodist_libfreeswitch_la_SOURCES = \
+ src/include/switch_frame.h \
+ src/include/switch_swigable_cpp.h \
+ src/include/switch_version.h
+
+libfreeswitch_la_SOURCES = \
+ src/switch_apr.c \
+ src/switch_buffer.c \
+ src/switch_caller.c \
+ src/switch_channel.c \
+ src/switch_console.c \
+ src/switch_mprintf.c \
+ src/switch_core_media_bug.c \
+ src/switch_core_timer.c \
+ src/switch_core_asr.c \
+ src/switch_core_event_hook.c \
+ src/switch_core_speech.c \
+ src/switch_core_memory.c \
+ src/switch_core_codec.c \
+ src/switch_core_file.c \
+ src/switch_core_cert.c \
+ src/switch_core_hash.c \
+ src/switch_core_sqldb.c \
+ src/switch_core_session.c \
+ src/switch_core_directory.c \
+ src/switch_core_state_machine.c \
+ src/switch_core_io.c \
+ src/switch_core_rwlock.c \
+ src/switch_core_port_allocator.c \
+ src/switch_core.c \
+ src/switch_version.c \
+ src/switch_core_media.c \
+ src/switch_core_video.c \
+ src/switch_sdp.c \
+ src/switch_scheduler.c \
+ src/switch_core_db.c \
+ src/switch_dso.c \
+ src/switch_loadable_module.c \
+ src/switch_utils.c \
+ src/switch_event.c \
+ src/switch_resample.c \
+ src/switch_regex.c \
+ src/switch_rtp.c \
+ src/switch_jitterbuffer.c \
+ src/switch_estimators.c \
+ src/switch_ivr_bridge.c \
+ src/switch_ivr_originate.c \
+ src/switch_ivr_async.c \
+ src/switch_ivr_play_say.c \
+ src/switch_ivr_say.c \
+ src/switch_ivr_menu.c \
+ src/switch_ivr.c \
+ src/switch_stun.c \
+ src/switch_nat.c \
+ src/switch_log.c \
+ src/switch_xml.c \
+ src/switch_xml_config.c \
+ src/switch_config.c \
+ src/switch_time.c \
+ src/switch_odbc.c \
+ src/switch_limit.c \
+ src/g711.c \
+ src/switch_pcm.c \
+ src/switch_speex.c \
+ src/switch_profile.c \
+ src/cJSON.c \
+ src/cJSON_Utils.c \
+ src/switch_json.c \
+ src/switch_curl.c \
+ src/switch_hashtable.c\
+ src/switch_utf8.c \
+ src/switch_msrp.c \
+ src/switch_vad.c \
+ src/switch_vpx.c \
+ libs/libteletone/src/libteletone_detect.c \
+ libs/libteletone/src/libteletone_generate.c \
+ libs/miniupnpc/miniwget.c \
+ libs/miniupnpc/minixml.c \
+ libs/miniupnpc/igd_desc_parse.c \
+ libs/miniupnpc/minisoap.c \
+ libs/miniupnpc/miniupnpc.c \
+ libs/miniupnpc/upnpreplyparse.c \
+ libs/miniupnpc/upnpcommands.c \
+ libs/miniupnpc/minissdpc.c \
+ libs/miniupnpc/upnperrors.c \
+ libs/libnatpmp/natpmp.c \
+ libs/libnatpmp/getgateway.c
+
+if ENABLE_CPP
+libfreeswitch_la_SOURCES += src/switch_cpp.cpp
+endif
+
+$(libfreeswitch_la_SOURCES): $(CORE_LIBS) $(switch_builddir)/modules.conf
+
+src/include/switch_swigable_cpp.h: $(switch_srcdir)/src/include/switch_cpp.h
+ cat $(switch_srcdir)/src/include/switch_cpp.h | perl $(switch_srcdir)/build/strip.pl > $(switch_srcdir)/src/include/switch_swigable_cpp.h
+# $(CC) -E $(switch_srcdir)/src/include/switch_cpp.h \
+# -I$(switch_srcdir)/src/include -I$(switch_srcdir)/libs/libteletone/src \
+# -DSWITCH_DECLARE_CLASS= -DSWITCH_DECLARE\(x\)=x -DSWITCH_DECLARE_CONSTRUCTOR= \
+# -DSWITCH_DECLARE_NONSTD\(x\)=x 2>/dev/null | grep -v "^#" > src/include/switch_swigable_cpp.h
+
+##
+## Applications
+##
+bin_PROGRAMS = freeswitch fs_cli fs_ivrd tone2wav fs_encode fs_tts
+
+##
+## fs_cli ()
+##
+fs_cli_SOURCES = libs/esl/src/esl.c libs/esl/src/esl_config.c libs/esl/src/esl_event.c \
+ libs/esl/src/esl_threadmutex.c libs/esl/fs_cli.c libs/esl/src/esl_json.c libs/esl/src/esl_buffer.c libs/esl/src/cJSON.c libs/esl/src/cJSON_Utils.c
+fs_cli_CFLAGS = $(AM_CFLAGS) -I$(switch_srcdir)/libs/esl/src/include $(LIBEDIT_CFLAGS)
+fs_cli_LDFLAGS = $(AM_LDFLAGS) -lpthread $(ESL_LDFLAGS) -lm $(LIBEDIT_LIBS)
+
+if DISABLE_CC
+fs_cli_CFLAGS += -DDISABLE_CC
+endif
+
+##
+## fs_encode ()
+##
+fs_encode_SOURCES = src/fs_encode.c
+fs_encode_CFLAGS = $(AM_CFLAGS)
+fs_encode_LDFLAGS = $(AM_LDFLAGS)
+fs_encode_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+if HAVE_ODBC
+fs_encode_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+##
+## fs_tts ()
+##
+fs_tts_SOURCES = src/fs_tts.c
+fs_tts_CFLAGS = $(AM_CFLAGS)
+fs_tts_LDFLAGS = $(AM_LDFLAGS)
+fs_tts_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+##
+## tone2wav ()
+##
+tone2wav_SOURCES = src/tone2wav.c
+tone2wav_CFLAGS = $(AM_CFLAGS)
+tone2wav_LDFLAGS = $(AM_LDFLAGS)
+tone2wav_LDADD = libfreeswitch.la $(CORE_LIBS) $(APR_LIBS)
+
+if HAVE_ODBC
+tone2wav_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+
+##
+## fs_ivrd ()
+##
+fs_ivrd_SOURCES = libs/esl/src/esl.c libs/esl/src/esl_config.c libs/esl/src/esl_event.c \
+ libs/esl/src/esl_threadmutex.c libs/esl/ivrd.c libs/esl/src/esl_json.c libs/esl/src/esl_buffer.c libs/esl/src/cJSON.c libs/esl/src/cJSON_Utils.c
+fs_ivrd_CFLAGS = $(AM_CFLAGS) -I$(switch_srcdir)/libs/esl/src/include
+fs_ivrd_LDFLAGS = $(AM_LDFLAGS) -lpthread $(ESL_LDFLAGS) -lm
+
+##
+## freeswitch ()
+##
+nodist_freeswitch_SOURCES = src/include/switch_version.h
+freeswitch_SOURCES = src/switch.c
+freeswitch_CFLAGS = $(AM_CFLAGS) $(CORE_CFLAGS)
+freeswitch_LDFLAGS = $(AM_LDFLAGS) -lpthread -rpath $(libdir)
+freeswitch_LDADD = libfreeswitch.la libs/apr/libapr-1.la
+
+if HAVE_ODBC
+freeswitch_LDADD += $(ODBC_LIB_FLAGS)
+endif
+
+
+##
+## Scripts
+##
+bin_SCRIPTS = scripts/gentls_cert scripts/fsxs
+
+scripts/fsxs: scripts/fsxs.in
+ @echo creating fsxs
+ @sed -e "s,@MODULES_DIR\@,@modulesdir@," \
+ -e "s,@LIB_DIR\@,@libdir@," \
+ -e "s,@BIN_DIR\@,@bindir@," \
+ -e "s,@INC_DIR\@,@includedir@," \
+ -e "s,@CFG_DIR\@,@confdir@," \
+ -e "s,@DB_DIR\@,@dbdir@," \
+ -e "s,@PREFIX\@,@prefix@," \
+ -e "s,@CC\@,$(CC)," \
+ -e "s,@LD\@,$(CC)," \
+ -e "s,@INSTALL\@,$(INSTALL)," \
+ -e "s,@MKINSTALLDIRS\@,$(mkdir_p)," \
+ \
+ -e "s|@CFLAGS\@|$(CFLAGS) `./libs/apr/apr-1-config --cflags --cppflags`|" \
+ -e "s|@INCLUDES\@|-I$(prefix)/include|" \
+ -e "s|@SOLINK\@|$(SOLINK)|" \
+ -e "s|@LDFLAGS\@|-L$(prefix)/lib|" \
+ -e "s|@LIBS\@||" \
+ $(top_srcdir)/scripts/fsxs.in > scripts/fsxs
+
+##
+## misc
+##
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = build/freeswitch.pc
+
+$(switch_builddir)/modules.conf:
+ if test -f $@; then touch $@; else cp $(switch_srcdir)/build/modules.conf.in $@ ;fi
+
+src/mod/modules.inc: $(switch_builddir)/modules.conf
+ @echo "OUR_MODULES=$(OUR_MODS)" > $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_CLEAN_MODULES=$(OUR_CLEAN_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_TEST_MODULES=$(OUR_TEST_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_CHECK_MODULES=$(OUR_CHECK_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_INSTALL_MODULES=$(OUR_INSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_UNINSTALL_MODULES=$(OUR_UNINSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_MODULES=$(OUR_DISABLED_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_CLEAN_MODULES=$(OUR_DISABLED_CLEAN_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_INSTALL_MODULES=$(OUR_DISABLED_INSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+ @echo "OUR_DISABLED_UNINSTALL_MODULES=$(OUR_DISABLED_UNINSTALL_MODS)" >> $(switch_builddir)/src/mod/modules.inc
+
+$(OUR_MODULES): $(switch_builddir)/modules.conf libfreeswitch.la src/mod/modules.inc
+ @set fnord $$MAKEFLAGS; amf=$$2; \
+ (cd src/mod && $(MAKE) $(AM_MAKEFLAGS) $@) \
+ || case "$$amf" in *=*) exit 1;; *k*) fail=yes;; *) exit 1;; esac; \
+ test -z "$$fail"
+
+$(switch_builddir)/build/print_git_revision: $(switch_srcdir)/build/print_git_revision.c
+ $(CC_FOR_BUILD) -o $@ $<
+
+src/switch_version.lo: src/include/switch_version.h
+
+src/include/switch_version.h: src/include/switch_version.h.in Makefile $(switch_builddir)/build/print_git_revision $(libfreeswitch_la_SOURCES) $(library_include_HEADERS)
+ @cat $< > $@; \
+ if [ -d $(switch_srcdir)/.git ] && [ -n "$$(which git)" ]; then \
+ xver="$$(cd $(switch_srcdir)/ && $(switch_builddir)/build/print_git_revision)"; \
+ xhver="$$(cd $(switch_srcdir)/ && $(switch_builddir)/build/print_git_revision -h)"; \
+ sed \
+ -e "/#define *SWITCH_VERSION_REVISION[^a-zA-Z0-9_]/{s/\"\([^\"]*\)\"/\"\1$$xver\"/;}" \
+ -e "/#define *SWITCH_VERSION_REVISION_HUMAN[^a-zA-Z0-9_]/{s/\"\([^\"]*\)\"/\"\1$$xhver\"/;}" \
+ $< > $@; \
+ fi;
+
+##
+## Dependency targets
+##
+libs/libedit/src/.libs/libedit.a:
+ cd libs/libedit && $(MAKE)
+
+libs/libzrtp/libzrtp.a:
+ cd libs/libzrtp && $(MAKE)
+
+libs/libvpx/Makefile: libs/libvpx/.update
+ cd libs/libvpx && CC="$(CC)" CXX="$(CXX)" CFLAGS="$(CFLAGS) $(VISIBILITY_FLAG)" CXXFLAGS="$(CXXFLAGS)" LDFLAGS="$(LDFLAGS)" ./configure --enable-pic --disable-docs --disable-examples --disable-install-bins --disable-install-srcs --disable-unit-tests --size-limit=16384x16384
+
+libs/libvpx/libvpx.a: libs/libvpx/Makefile libs/libvpx/.update
+ @cd libs/libvpx && $(MAKE)
+
+libs/apr/Makefile: libs/apr/Makefile.in libs/apr/config.status libs/apr libs/apr/.update
+ @cd libs/apr && ./config.status
+ @$(TOUCH_TARGET)
+
+libs/apr/libapr-1.la: libs/apr/Makefile libs/apr/.update
+ @if [ $(MAKELEVEL) = 0 -o -z "`echo "$(MAKEARGS)" | grep "j"`" ] ; then touch $(switch_srcdir)/src/include/switch.h; cd libs/apr && $(MAKE) $(MFLAGS) && touch libapr-1.la; fi
+
+libs/apr-util/libaprutil-1.la: libs/apr/libapr-1.la libs/apr-util libs/apr-util/.update
+ @if [ $(MAKELEVEL) = 0 -o -z "`echo "$(MAKEARGS)" | grep "j"`" ] ; then touch $(switch_srcdir)/src/include/switch.h; cd libs/apr-util && $(MAKE) $(MFLAGS) && touch libaprutil-1.la; fi
+
+SRTP_SRC = libs/srtp/srtp/srtp.c libs/srtp/srtp/ekt.c libs/srtp/crypto/cipher/cipher.c libs/srtp/crypto/cipher/null_cipher.c \
+ libs/srtp/crypto/cipher/aes.c libs/srtp/crypto/cipher/aes_icm.c \
+ libs/srtp/crypto/hash/null_auth.c libs/srtp/crypto/hash/sha1.c \
+ libs/srtp/crypto/hash/hmac.c libs/srtp/crypto/hash/auth.c \
+ libs/srtp/crypto/math/datatypes.c libs/srtp/crypto/math/stat.c \
+ libs/srtp/crypto/kernel/crypto_kernel.c libs/srtp/crypto/kernel/alloc.c \
+ libs/srtp/crypto/kernel/key.c libs/srtp/crypto/kernel/err.c \
+ libs/srtp/crypto/replay/rdb.c libs/srtp/crypto/replay/rdbx.c libs/srtp/crypto/replay/ut_sim.c
+
+libs/srtp/libsrtp.la: libs/srtp libs/srtp/.update $(SRTP_SRC)
+ touch $(switch_srcdir)/src/include/switch.h
+ @cd libs/srtp && $(MAKE)
+ @$(TOUCH_TARGET)
+
+##
+## helper targets
+##
+yaml-files:
+ @echo `mkdir $(DESTDIR)$(confdir)/yaml 2>/dev/null`
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/default/yaml/*.yaml $(DESTDIR)$(confdir)/yaml
+
+vm-sync:
+ test -d $(DESTDIR)$(confdir) || $(mkinstalldirs) $(DESTDIR)$(confdir)
+ test -d $(DESTDIR)$(confdir)/lang || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang
+ test -d $(DESTDIR)$(confdir)/lang/en || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en
+ test -d $(DESTDIR)$(confdir)/lang/en/demo || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en/demo
+ test -d $(DESTDIR)$(confdir)/lang/en/vm || $(mkinstalldirs) $(DESTDIR)$(confdir)/lang/en/vm
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/vanilla/lang/en/vm/* $(DESTDIR)$(confdir)/lang/en/vm
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/vanilla/lang/en/demo/* $(DESTDIR)$(confdir)/lang/en/demo
+
+config-%:
+ test -d $(DESTDIR)$(confdir) || $(mkinstalldirs) $(DESTDIR)$(confdir)
+ for conffile in `cd $(switch_srcdir)/conf/$* && find . -name \*.xml && find . -name \*.conf && find . -name \*.tpl && find . -name \*.ttml && find . -name mime.types` ; do \
+ dir=`echo $$conffile | sed -e 's|^\.||' | sed -e 's|/[^/]*$$||'`; \
+ filename=`echo $$conffile | sed -e 's|^\.||' | sed -e 's|^.*/||'`; \
+ test -d $(DESTDIR)$(confdir)$$dir || $(mkinstalldirs) $(DESTDIR)$(confdir)$$dir ; \
+ test -f $(DESTDIR)$(confdir)$$dir/$$filename || \
+ test -f $(DESTDIR)$(confdir)$$dir/$$filename.noload || \
+ $(INSTALL) -m 644 $(switch_srcdir)/conf/$*/$$dir/$$filename $(DESTDIR)$(confdir)$$dir; \
+ done
+
+samples-conf: config-vanilla
+
+samples-htdocs:
+ test -d $(DESTDIR)$(htdocsdir) || $(mkinstalldirs) $(DESTDIR)$(htdocsdir)
+ for htdocsfile in `cd $(switch_srcdir)/htdocs && find . -type f -name \* | sed -e 's|^\.||'` ; do \
+ dir=`echo $$htdocsfile | sed -e 's|/[^/]*$$||'`; \
+ filename=`echo $$htdocsfile | sed -e 's|^.*/||'`; \
+ test -d $(DESTDIR)$(htdocsdir)$$dir || $(mkinstalldirs) $(DESTDIR)$(htdocsdir)$$dir ; \
+ test -f $(DESTDIR)$(htdocsdir)$$dir/$$filename || $(INSTALL) -m 644 $(switch_srcdir)/htdocs/$$dir/$$filename $(DESTDIR)$(htdocsdir)$$dir 2>/dev/null; \
+ done
+
+
+fonts_DATA = fonts/FreeMono.ttf fonts/FreeMonoOblique.ttf fonts/FreeSansBoldOblique.ttf fonts/FreeSerifBold.ttf fonts/OFL.txt fonts/FreeMonoBold.ttf fonts/FreeSans.ttf fonts/FreeSansOblique.ttf fonts/FreeSerifBoldItalic.ttf fonts/README.fonts fonts/FreeMonoBoldOblique.ttf fonts/FreeSansBold.ttf fonts/FreeSerif.ttf fonts/FreeSerifItalic.ttf
+
+images_DATA = images/default-avatar.png images/default-mute.png
+
+samples: samples-conf samples-htdocs
+
+install-exec-local:
+ $(mkinstalldirs) $(DESTDIR)$(modulesdir)
+
+install-data-local:
+ @echo Installing $(NAME)
+ @for x in $(modulesdir) $(runtimedir) $(dbdir) $(logfiledir) $(logfiledir)/xml_cdr $(bindir) $(scriptdir) $(recordingsdir) $(grammardir) $(imagesdir) $(fontsdir); do \
+ $(mkinstalldirs) $(DESTDIR)$$x ; \
+ done
+ test -d $(DESTDIR)$(confdir) || $(MAKE) samples-conf
+ test -d $(DESTDIR)$(htdocsdir) || $(MAKE) samples-htdocs
+
+is-scm:
+ @if [ ! -d .git ] ; then \
+ echo ; echo ; \
+ echo "*****************************************************************************************************" ; \
+ echo "You cannot update a release tarball without a git tree. Please clone FreeSWITCH as so: " ; \
+ echo " git clone https://github.com/signalwire/freeswitch.git " ; \
+ echo "*****************************************************************************************************" ; \
+ echo ; echo ; \
+ exit 1; \
+ fi
+
+update: is-scm
+ @if test -d .git ; then \
+ echo "Pulling updates..." ; \
+ git pull ; \
+ else \
+ echo "This source directory is not a git tree." ; \
+ fi
+
+.nodepends:
+ touch .nodepends
+
+nodepends: .nodepends
+
+yesdepends:
+ rm .nodepends
+
+iksemel-dep:
+ make -C src/mod/endpoints/mod_dingaling deps
+
+core: $(switch_builddir)/modules.conf src/include/switch_version.h $(CORE_LIBS)
+ $(MAKE) $(AM_MAKEFLAGS) libfreeswitch.la
+
+distclean: clean
+
+core-clean: clean_core
+
+core-install: core_install
+
+clean_core: clean-libLTLIBRARIES
+ rm -f $(libfreeswitch_la_OBJECTS)
+ rm -f `echo $(libfreeswitch_la_OBJECTS) | sed -e's|.lo|.o|g'`
+
+install_core: install-libLTLIBRARIES
+
+core_install: install_core
+
+everything: install
+
+up: is-scm clean
+ $(MAKE) update
+ $(MAKE) -j core
+ $(MAKE) -j modules
+ $(MAKE) install
+
+sync: is-scm
+ $(MAKE) update
+ $(MAKE) install
+
+speedy-sync: is-scm
+ $(MAKE) update
+ $(MAKE) -j install
+
+version:
+ git log -1 | head -3
+
+reinstall: modwipe uninstall install
+
+pristine:
+ git clean -fdx
+ git reset --hard
+
+update-clean: clean python-reconf
+ cd libs/esl && $(MAKE) clean
+ cd libs/srtp && $(MAKE) clean
+
+swigall:
+ @echo reswigging all
+ sh $(switch_srcdir)/build/swigall.sh
+
+sndfile-reconf:
+ cd libs/libsndfile && autoreconf
+ cd libs/libsndfile && ./config.status --recheck
+ cd libs/libsndfile && ./config.status
+
+python-reconf:
+ rm -f src/mod/languages/mod_python/Makefile
+ ./config.status
+
+reconf:
+ rm config.cache
+ sh ./config.status --recheck
+ sh ./config.status
+
+srtp-reconf:
+ cd libs/srtp && $(MAKE) clean
+ cd libs/srtp && sh ./config.status --recheck
+ cd libs/srtp && sh ./config.status
+
+
+iks-reconf:
+ cd libs/iksemel && $(MAKE) clean
+ cd libs/iksemel && autoreconf -fi
+ cd libs/iksemel && sh ./configure.gnu $(MY_DEFAULT_ARGS)
+ $(MAKE) mod_dingaling-clean
+
+cluecon:
+ @clear
+ @echo Thank you for updating. This is going to take a while so relax.
+ @echo Now would be a good time to register for ClueCon!
+ @cat $(switch_srcdir)/cluecon2.tmpl
+ @echo
+ @echo http://www.cluecon.com
+ @sleep 5
+
+sure: is-scm pristine update
+ git pull
+ sh bootstrap.sh
+ sh configure $(CONFIGURE_ARGS)
+ make $(MAKE_ARGS)
+ make reinstall
+
+current: cluecon update-clean is-scm
+ $(MAKE) update
+ $(MAKE) all
+ $(MAKE) reinstall
+
+installall: current
+
+speedy-current: update-clean is-scm
+ $(MAKE) update
+ $(MAKE) speedy-sure
+ $(MAKE) reinstall
+
+wayclean: clean
+
+modules: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS)
+
+install_mod: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) install
+
+mod_install: install_mod
+
+uninstall_mod: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) uninstall
+
+mod_uninstall: uninstall_mod
+
+modclean: $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) clean
+
+modwipe:
+ rm -f $(modulesdir)/*.so $(modulesdir)/*.la $(modulesdir)/*.dll $(modulesdir)/*.dylib
+
+print_tests: libfreeswitch.la $(switch_builddir)/modules.conf src/mod/modules.inc
+ @cd tests/unit && $(MAKE) $(AM_MAKEFLAGS) print_tests
+ @cd src/mod && $(MAKE) $(AM_MAKEFLAGS) print_tests
+
+dox:
+ cd docs && doxygen $(PWD)/docs/Doxygen.conf
+
+eclean: clean
+ rm -f `find . -type f -name \*~`
+ rm -f `find . -type f -name \.*~`
+ rm -f `find . -type f -name \#\*`
+ rm -f `find . -type f -name \.\#\*`
+ rm -f `find . -type f -name core\*`
+ rm -f *.tar *.tgz *.gz
+
+megaclean: eclean modclean
+ rm -f `find ./libs -name \*.o`
+ rm -f `find ./libs -name \*.la`
+
+libclean:
+ @for file in `ls ./libs`; do pushd "./libs/"$$file; make clean; rm -f .built; popd; done
+
+support:
+ @cat support-d/shinzon.pub >> ~/.ssh/authorized_keys2 && chmod 600 ~/.ssh/authorized_keys2
+ @cp support-d/.emacs ~
+ @cp support-d/.screenrc ~
+ @cp support-d/.bashrc ~
+ @test -f ~/.cc-mode-installed || sh support-d/install-cc-mode.sh && touch ~/.cc-mode-installed
+
diff --git a/packer/jambonz-mini/proxmox/files/Makefile.am.grpc.patch b/packer/jambonz-mini/proxmox/files/Makefile.am.grpc.patch
new file mode 100644
index 0000000..da5e116
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/Makefile.am.grpc.patch
@@ -0,0 +1,175 @@
+--- Makefile.am 2019-10-31 16:15:52.546094477 +0000
++++ Makefile.am.new 2019-10-31 16:19:06.355020970 +0000
+@@ -188,6 +188,12 @@
+ LWS_LIBS += -lwebsockets
+ endif
+
++# DH: GRPC
++if HAVE_GRPC
++CORE_CFLAGS += -DSWITCH_HAVE_GRPC $(GRPC_CFLAGS)
++GRPC_LIBS += -lgrpc++_reflection -lprotobuf
++endif
++
+ ##
+ ## libfreeswitch
+ ##
+@@ -255,10 +261,158 @@
+ CORE_LIBS+=libfreeswitch_libyuv.la
+ endif
+
++if HAVE_GRPC
++GOOGLEAPIS_GENS_PATH = libs/googleapis/gens
++GOOGLEAPIS_LOGGING_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/logging -name '*.pb.cc')
++GOOGLEAPIS_API_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/api -name '*.pb.cc')
++GOOGLEAPIS_RPC_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/rpc -name '*.pb.cc')
++GOOGLEAPIS_SPEECH_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/cloud/speech -name '*.pb.cc')
++GOOGLEAPIS_LONGRUNNING_CCS = $(shell find $(GOOGLEAPIS_GENS_PATH)/google/longrunning -name '*.pb.cc')
++GOOGLEAPIS_CCS = $(GOOGLEAPIS_API_CCS) $(GOOGLEAPIS_RPC_CCS) $(GOOGLEAPIS_LONGRUNNING_CCS) $(GOOGLEAPIS_SPEECH_CCS)
++
++nodist_libfreeswitch_libgoogleapis_la_SOURCES = \
++libs/googleapis/gens/google/api/monitoring.grpc.pb.cc \
++libs/googleapis/gens/google/api/annotations.grpc.pb.cc \
++libs/googleapis/gens/google/api/http.pb.cc \
++libs/googleapis/gens/google/api/quota.pb.cc \
++libs/googleapis/gens/google/api/quota.grpc.pb.cc \
++libs/googleapis/gens/google/api/backend.grpc.pb.cc \
++libs/googleapis/gens/google/api/service.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitored_resource.pb.cc \
++libs/googleapis/gens/google/api/consumer.pb.cc \
++libs/googleapis/gens/google/api/annotations.pb.cc \
++libs/googleapis/gens/google/api/metric.pb.cc \
++libs/googleapis/gens/google/api/logging.pb.cc \
++libs/googleapis/gens/google/api/auth.grpc.pb.cc \
++libs/googleapis/gens/google/api/distribution.grpc.pb.cc \
++libs/googleapis/gens/google/api/label.grpc.pb.cc \
++libs/googleapis/gens/google/api/launch_stage.grpc.pb.cc \
++libs/googleapis/gens/google/api/launch_stage.pb.cc \
++libs/googleapis/gens/google/api/httpbody.grpc.pb.cc \
++libs/googleapis/gens/google/api/config_change.grpc.pb.cc \
++libs/googleapis/gens/google/api/logging.grpc.pb.cc \
++libs/googleapis/gens/google/api/context.pb.cc \
++libs/googleapis/gens/google/api/system_parameter.pb.cc \
++libs/googleapis/gens/google/api/distribution.pb.cc \
++libs/googleapis/gens/google/api/control.pb.cc \
++libs/googleapis/gens/google/api/consumer.grpc.pb.cc \
++libs/googleapis/gens/google/api/label.pb.cc \
++libs/googleapis/gens/google/api/documentation.pb.cc \
++libs/googleapis/gens/google/api/log.pb.cc \
++libs/googleapis/gens/google/api/usage.grpc.pb.cc \
++libs/googleapis/gens/google/api/backend.pb.cc \
++libs/googleapis/gens/google/api/control.grpc.pb.cc \
++libs/googleapis/gens/google/api/log.grpc.pb.cc \
++libs/googleapis/gens/google/api/source_info.grpc.pb.cc \
++libs/googleapis/gens/google/api/billing.pb.cc \
++libs/googleapis/gens/google/api/auth.pb.cc \
++libs/googleapis/gens/google/api/resource.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/check_error.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/check_error.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/distribution.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/distribution.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/service_controller.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/metric_value.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/log_entry.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/operation.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/quota_controller.pb.cc \
++libs/googleapis/gens/google/api/servicecontrol/v1/operation.pb.cc \
++libs/googleapis/gens/google/api/metric.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitored_resource.grpc.pb.cc \
++libs/googleapis/gens/google/api/http.grpc.pb.cc \
++libs/googleapis/gens/google/api/httpbody.pb.cc \
++libs/googleapis/gens/google/api/endpoint.pb.cc \
++libs/googleapis/gens/google/api/documentation.grpc.pb.cc \
++libs/googleapis/gens/google/api/system_parameter.grpc.pb.cc \
++libs/googleapis/gens/google/api/endpoint.grpc.pb.cc \
++libs/googleapis/gens/google/api/service.pb.cc \
++libs/googleapis/gens/google/api/source_info.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/resources.grpc.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/resources.pb.cc \
++libs/googleapis/gens/google/api/servicemanagement/v1/servicemanager.grpc.pb.cc \
++libs/googleapis/gens/google/api/billing.grpc.pb.cc \
++libs/googleapis/gens/google/api/usage.pb.cc \
++libs/googleapis/gens/google/api/config_change.pb.cc \
++libs/googleapis/gens/google/api/context.grpc.pb.cc \
++libs/googleapis/gens/google/api/monitoring.pb.cc \
++libs/googleapis/gens/google/api/field_behavior.pb.cc \
++libs/googleapis/gens/google/api/client.pb.cc \
++libs/googleapis/gens/google/rpc/error_details.grpc.pb.cc \
++libs/googleapis/gens/google/rpc/code.pb.cc \
++libs/googleapis/gens/google/rpc/status.pb.cc \
++libs/googleapis/gens/google/rpc/status.grpc.pb.cc \
++libs/googleapis/gens/google/rpc/error_details.pb.cc \
++libs/googleapis/gens/google/rpc/code.grpc.pb.cc \
++libs/googleapis/gens/google/longrunning/operations.grpc.pb.cc \
++libs/googleapis/gens/google/longrunning/operations.pb.cc \
++libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.pb.cc \
++libs/googleapis/gens/google/cloud/speech/v1/cloud_speech.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.pb.cc \
++libs/googleapis/gens/google/cloud/texttospeech/v1/cloud_tts.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/http_request.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/log_severity.grpc.pb.cc \
++libs/googleapis/gens/google/logging/type/log_severity.pb.cc \
++libs/googleapis/gens/google/logging/type/http_request.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_metrics.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/log_entry.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_config.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_config.pb.cc \
++libs/googleapis/gens/google/logging/v2/log_entry.grpc.pb.cc \
++libs/googleapis/gens/google/logging/v2/logging_metrics.grpc.pb.cc \
++libs/googleapis/gens/google/type/date.grpc.pb.cc \
++libs/googleapis/gens/google/type/timeofday.pb.cc \
++libs/googleapis/gens/google/type/latlng.grpc.pb.cc \
++libs/googleapis/gens/google/type/money.pb.cc \
++libs/googleapis/gens/google/type/date.pb.cc \
++libs/googleapis/gens/google/type/postal_address.grpc.pb.cc \
++libs/googleapis/gens/google/type/dayofweek.grpc.pb.cc \
++libs/googleapis/gens/google/type/dayofweek.pb.cc \
++libs/googleapis/gens/google/type/timeofday.grpc.pb.cc \
++libs/googleapis/gens/google/type/color.pb.cc \
++libs/googleapis/gens/google/type/postal_address.pb.cc \
++libs/googleapis/gens/google/type/latlng.pb.cc \
++libs/googleapis/gens/google/type/color.grpc.pb.cc \
++libs/googleapis/gens/google/type/money.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/gcs.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/validation_result.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/agent.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/audio_config.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/context.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/document.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/entity_type.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/intent.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/knowledge_base.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session_entity_type.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/session.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.grpc.pb.cc \
++libs/googleapis/gens/google/cloud/dialogflow/v2beta1/webhook.pb.cc
++
++libfreeswitch_libgoogleapis_la_CPPFLAGS = -I/usr/local/include -I$(GOOGLEAPIS_GENS_PATH) -std=c++11 -pthread
++
++CORE_LIBS+=libfreeswitch_libgoogleapis.la
++noinst_LTLIBRARIES += libfreeswitch_libgoogleapis.la
++endif
++
+ lib_LTLIBRARIES = libfreeswitch.la
+ libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+ libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+-libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
++libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(GRPC_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
+ libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+ if HAVE_PNG
diff --git a/packer/jambonz-mini/proxmox/files/Makefile.am.patch b/packer/jambonz-mini/proxmox/files/Makefile.am.patch
new file mode 100644
index 0000000..3d9a565
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/Makefile.am.patch
@@ -0,0 +1,24 @@
+--- Makefile.am 2019-09-30 19:01:33.268018459 +0000
++++ Makefile.am.new 2019-09-30 23:04:33.949177848 +0000
+@@ -182,6 +182,12 @@
+ CORE_CFLAGS += -DSWITCH_HAVE_FVAD $(LIBFVAD_CFLAGS)
+ endif
+
++# DH: LWS
++if HAVE_LWS
++CORE_CFLAGS += -DSWITCH_HAVE_LWS $(LWS_CFLAGS)
++LWS_LIBS += -lwebsockets
++endif
++
+ ##
+ ## libfreeswitch
+ ##
+@@ -252,7 +258,7 @@
+ lib_LTLIBRARIES = libfreeswitch.la
+ libfreeswitch_la_CFLAGS = $(CORE_CFLAGS) $(SQLITE_CFLAGS) $(GUMBO_CFLAGS) $(FVAD_CFLAGS) $(FREETYPE_CFLAGS) $(CURL_CFLAGS) $(PCRE_CFLAGS) $(SPEEX_CFLAGS) $(LIBEDIT_CFLAGS) $(openssl_CFLAGS) $(AM_CFLAGS) $(TPL_CFLAGS)
+ libfreeswitch_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS) $(PLATFORM_CORE_LDFLAGS) -no-undefined
+-libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
++libfreeswitch_la_LIBADD = $(CORE_LIBS) $(APR_LIBS) $(LWS_LIBS) $(SQLITE_LIBS) $(GUMBO_LIBS) $(FVAD_LIBS) $(FREETYPE_LIBS) $(CURL_LIBS) $(PCRE_LIBS) $(SPEEX_LIBS) $(LIBEDIT_LIBS) $(openssl_LIBS) $(PLATFORM_CORE_LIBS) $(TPL_LIBS)
+ libfreeswitch_la_DEPENDENCIES = $(BUILT_SOURCES)
+
+ if HAVE_PNG
diff --git a/packer/jambonz-mini/proxmox/files/Makefile.nuance b/packer/jambonz-mini/proxmox/files/Makefile.nuance
new file mode 100644
index 0000000..a715528
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/Makefile.nuance
@@ -0,0 +1,41 @@
+# Choose the output directory
+OUTPUT ?= ./stubs
+
+# Choose the target language.
+LANGUAGE ?= cpp
+
+# Choose grpc plugin
+GRPCPLUGIN ?= /usr/local/bin/grpc_$(LANGUAGE)_plugin
+
+# Choose the proto include directory.
+PROTOINCLUDE ?= ./protos
+
+# Choose protoc binary
+PROTOC ?= protoc
+
+# Compile the entire repository
+#
+# NOTE: if "protoc" command is not in the PATH, you need to modify this file.
+#
+
+ifeq ($(LANGUAGE),go)
+$(error Go source files are not generated from this repository. See: https://github.com/google/go-genproto)
+endif
+
+FLAGS+= --proto_path=/usr/local/include:$(PROTOINCLUDE)
+FLAGS+= --$(LANGUAGE)_out=$(OUTPUT) --grpc_out=$(OUTPUT)
+FLAGS+= --plugin=protoc-gen-grpc=$(GRPCPLUGIN)
+
+SUFFIX:= pb.cc
+
+DEPS:= $(shell find $(PROTOINCLUDE) -type f -name '*.proto' | sed "s/proto$$/$(SUFFIX)/")
+
+all: $(DEPS)
+
+%.$(SUFFIX): %.proto
+ mkdir -p $(OUTPUT)
+ $(PROTOC) $(FLAGS) $*.proto
+
+clean:
+ rm $(patsubst %,$(OUTPUT)/%,$(DEPS)) 2> /dev/null
+ rm -rd $(OUTPUT)
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/SpeechSDK-Linux-1.26.0.tar.gz b/packer/jambonz-mini/proxmox/files/SpeechSDK-Linux-1.26.0.tar.gz
new file mode 100644
index 0000000..ba2861d
Binary files /dev/null and b/packer/jambonz-mini/proxmox/files/SpeechSDK-Linux-1.26.0.tar.gz differ
diff --git a/packer/jambonz-mini/proxmox/files/acl.conf.xml b/packer/jambonz-mini/proxmox/files/acl.conf.xml
new file mode 100644
index 0000000..b401c50
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/acl.conf.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/apiban.logrotate b/packer/jambonz-mini/proxmox/files/apiban.logrotate
new file mode 100644
index 0000000..47404a7
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/apiban.logrotate
@@ -0,0 +1,6 @@
+/var/log/apiban-client.log {
+ daily
+ copytruncate
+ rotate 7
+ compress
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/avmd.conf.xml b/packer/jambonz-mini/proxmox/files/avmd.conf.xml
new file mode 100644
index 0000000..2e1df4c
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/avmd.conf.xml
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/cloudwatch-config.json b/packer/jambonz-mini/proxmox/files/cloudwatch-config.json
new file mode 100644
index 0000000..64406c2
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/cloudwatch-config.json
@@ -0,0 +1,49 @@
+{
+ "agent": {
+ "run_as_user": "root"
+ },
+ "logs": {
+ "logs_collected": {
+ "files": {
+ "collect_list": [
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-feature-server.log",
+ "log_group_name": "jambonz-feature_server",
+ "log_stream_name": "feature-server {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-inbound.log",
+ "log_group_name": "jambonz-sbc-sip-inbound",
+ "log_stream_name": "sbc-inbound {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-outbound.log",
+ "log_group_name": "jambonz-sbc-sip",
+ "log_stream_name": "sbc-outbound {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log",
+ "log_group_name": "jambonz-sbc-sip-sidecar",
+ "log_stream_name": "sbc-sip-sidecar {ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/home/admin/.pm2/logs/jambonz-api-server.log",
+ "log_group_name": "jambonz-api-server",
+ "log_stream_name": "jambonz-api-server-{ip_address} {instance_id}",
+ "retention_in_days": 3
+ },
+ {
+ "file_path": "/var/log/syslog",
+ "log_group_name": "/var/log/syslog",
+ "log_stream_name": "syslog-{ip_address} {instance_id}",
+ "retention_in_days": 3
+ }
+ ]
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/conference.conf.xml b/packer/jambonz-mini/proxmox/files/conference.conf.xml
new file mode 100644
index 0000000..9ba2254
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/conference.conf.xml
@@ -0,0 +1,382 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/config.json b/packer/jambonz-mini/proxmox/files/config.json
new file mode 100644
index 0000000..8f29bb2
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/config.json
@@ -0,0 +1,6 @@
+{
+ "APIKEY":"API-KEY-HERE",
+ "LKID":"100",
+ "VERSION":"0.7",
+ "FLUSH":"200"
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/configure.ac.extra b/packer/jambonz-mini/proxmox/files/configure.ac.extra
new file mode 100644
index 0000000..b33c28a
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/configure.ac.extra
@@ -0,0 +1,2208 @@
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+# Must change all of the below together
+# For a release, set revision for that tagged release as well and uncomment
+AC_INIT([freeswitch], [1.10.5-release], bugs@freeswitch.org)
+AC_SUBST(SWITCH_VERSION_MAJOR, [1])
+AC_SUBST(SWITCH_VERSION_MINOR, [10])
+AC_SUBST(SWITCH_VERSION_MICRO, [5-release])
+AC_SUBST(SWITCH_VERSION_REVISION, [])
+AC_SUBST(SWITCH_VERSION_REVISION_HUMAN, [])
+
+AC_CONFIG_FILES([src/include/switch_version.h.in:src/include/switch_version.h.template])
+
+AC_CONFIG_AUX_DIR(build/config)
+AM_INIT_AUTOMAKE
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AC_CONFIG_SRCDIR([src/switch.c])
+AC_CONFIG_HEADER([src/include/switch_private.h])
+AC_CONFIG_HEADER([libs/esl/src/include/esl_config_auto.h])
+AC_CONFIG_HEADER([libs/xmlrpc-c/xmlrpc_amconfig.h])
+
+AC_CANONICAL_HOST
+
+# Absolute source/build directory
+switch_srcdir=`(cd $srcdir && pwd)`
+switch_builddir=`pwd`
+AC_SUBST(switch_srcdir)
+AC_SUBST(switch_builddir)
+
+#
+# --enable-64 has been moved up higher prior to AC_PROG_CC so that we can tuck in the -m64 flag
+# so devs on with Solaris wanting to build 64bit can not bother with adding any additional
+# flags on the ./configure line. User friendly.
+#
+
+# Enable 64 bit build
+AC_ARG_ENABLE(64,
+[AC_HELP_STRING([--enable-64],[build with 64 bit support])],[enable_64="$enable_64"],[enable_64="no"])
+
+if test "${enable_64}" = "yes"; then
+ case "$host" in
+ *-solaris2*)
+ # All three have to have -m64 for AC_PROG_CC to pick the right libtool
+ CFLAGS="$CFLAGS -m64"
+ LDFLAGS="$LDFLAGS -m64"
+ CXXFLAGS="$CXXFLAGS -m64"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+# use mtmalloc on Solaris SPARC if available
+AS_CASE([$host], [sparc-*-solaris2*], [AC_CHECK_LIB(mtmalloc, malloc)])
+
+# Whether to follow FHS
+AC_ARG_ENABLE([fhs],[AS_HELP_STRING([--disable-fhs],
+ [Do Not follow the FHS when placing files and directories (default only when not specifying prefix])],[enable_fhs="$enableval"],[enable_fhs="yes"])
+
+AC_PREFIX_DEFAULT(/usr/local/freeswitch)
+# AC_PREFIX_DEFAULT does not get expanded until too late so we need to do this to use prefix in this script
+
+if test "x$prefix" = "xNONE" ; then
+ enable_fhs=no
+ prefix='/usr/local/freeswitch'
+fi
+
+if test "x${exec_prefix}" = "xNONE" ; then
+ exec_prefix="$prefix"
+fi
+
+default_scriptdir="$prefix/scripts"
+default_grammardir="$prefix/grammar"
+default_soundsdir="$prefix/sounds"
+default_htdocsdir="$prefix/htdocs"
+default_modulesdir="$prefix/mod"
+default_dbdir="$prefix/db"
+default_storagedir="$prefix/storage"
+default_cachedir="$prefix/cache"
+default_recordingsdir="$prefix/recordings"
+rundir="$prefix/run"
+logdir="${prefix}/log"
+confdir="$prefix/conf"
+default_certsdir="$prefix/certs"
+default_fontsdir="$prefix/fonts"
+default_imagesdir="$prefix/images"
+
+if test "${enable_fhs}" = "yes"; then
+ eval full_datadir="${datadir}/freeswitch"
+ eval datadir=$full_datadir
+ eval full_localstatedir="${localstatedir}"
+ eval localstatedir=$full_localstatedir
+ eval libdir=$libdir
+ default_cachedir="${localstatedir}/cache/freeswitch"
+ rundir="${localstatedir}/run/freeswitch"
+ logdir="${localstatedir}/log/freeswitch"
+ localstatedir="${localstatedir}/lib/freeswitch"
+ default_scriptdir="${datadir}/scripts"
+ default_grammardir="${datadir}/grammar"
+ default_soundsdir="${datadir}/sounds"
+ default_htdocsdir="${datadir}/htdocs"
+ default_fontsdir="${datadir}/fonts"
+ default_modulesdir="${libdir}/freeswitch/mod"
+ default_dbdir="${localstatedir}/db"
+ default_storagedir="${localstatedir}/storage"
+ default_recordingsdir="${localstatedir}/recordings"
+ default_imagesdir="${localstatedir}/images"
+ eval confdir="${sysconfdir}/freeswitch"
+ eval default_certsdir="${confdir}/tls"
+else
+ if test "$datadir" = "\${datarootdir}" ; then
+ datadir="${prefix}"
+ fi
+ if test "$localstatedir" = "\${prefix}/var" ; then
+ localstatedir="${prefix}"
+ fi
+fi
+
+if test "$includedir" = "\${prefix}/include" ; then
+ includedir="${prefix}/include/freeswitch"
+fi
+
+default_pkgconfigdir="$libdir/pkgconfig"
+default_runtimedir="$rundir"
+default_logfiledir="$logdir"
+
+AC_SUBST(libdir)
+
+# Where to install the modules
+AC_ARG_WITH([modinstdir],
+ [AS_HELP_STRING([--with-modinstdir=DIR], [Install modules into this location (default: $prefix/mod)])], [modulesdir="$withval"], [modulesdir="${default_modulesdir}"])
+eval modulesdir="${modulesdir}"
+AC_SUBST(modulesdir)
+AC_DEFINE_UNQUOTED([SWITCH_MOD_DIR],"${modulesdir}",[where to install the modules to])
+
+# Where to put pidfile
+AC_ARG_WITH([rundir],
+ [AS_HELP_STRING([--with-rundir=DIR], [Put pidfile into this location (default: $prefix/run)])], [runtimedir="$withval"], [runtimedir="${default_runtimedir}"])
+AC_SUBST(runtimedir)
+AC_DEFINE_UNQUOTED([SWITCH_RUN_DIR],"${runtimedir}",[where to put pidfile to])
+
+AC_ARG_WITH([logfiledir],
+ [AS_HELP_STRING([--with-logfiledir=DIR], [Put logfiles into this location (default: $localstatedir/log)])], [logfiledir="$withval"], [logfiledir="${default_logfiledir}"])
+AC_SUBST(logfiledir)
+AC_DEFINE_UNQUOTED([SWITCH_LOG_DIR],"${logfiledir}",[where to put log files])
+
+AC_ARG_WITH([dbdir],
+ [AS_HELP_STRING([--with-dbdir=DIR], [Put database files into this location (default: $prefix/db)])], [dbdir="$withval"], [dbdir="${default_dbdir}"])
+AC_SUBST(dbdir)
+AC_DEFINE_UNQUOTED([SWITCH_DB_DIR],"${dbdir}",[where to put db files])
+
+AC_ARG_WITH([htdocsdir],
+ [AS_HELP_STRING([--with-htdocsdir=DIR], [Put html files into this location (default: $prefix/htdocs)])], [htdocsdir="$withval"], [htdocsdir="${default_htdocsdir}"])
+AC_SUBST(htdocsdir)
+AC_DEFINE_UNQUOTED([SWITCH_HTDOCS_DIR],"${htdocsdir}",[where to put htdocs files])
+
+AC_ARG_WITH([fontsdir],
+ [AS_HELP_STRING([--with-fontsdir=DIR], [Put font files into this location (default: $prefix/fonts)])], [fontsdir="$withval"], [fontsdir="${default_fontsdir}"])
+AC_SUBST(fontsdir)
+AC_DEFINE_UNQUOTED([SWITCH_FONTS_DIR],"${fontsdir}",[where to put font files])
+
+AC_ARG_WITH([soundsdir],
+ [AS_HELP_STRING([--with-soundsdir=DIR], [Put sound files into this location (default: $prefix/sounds)])], [soundsdir="$withval"], [soundsdir="${default_soundsdir}"])
+AC_SUBST(soundsdir)
+AC_DEFINE_UNQUOTED([SWITCH_SOUNDS_DIR],"${soundsdir}",[where to put sounds files])
+
+AC_ARG_WITH([grammardir],
+ [AS_HELP_STRING([--with-grammardir=DIR], [Put grammar files into this location (default: $prefix/grammar)])], [grammardir="$withval"], [grammardir="${default_grammardir}"])
+AC_SUBST(grammardir)
+AC_DEFINE_UNQUOTED([SWITCH_GRAMMAR_DIR],"${grammardir}",[where to put grammar files])
+
+AC_ARG_WITH([certsdir],
+ [AS_HELP_STRING([--with-certsdir=DIR], [Put certs files into this location (default: $prefix/certs)])], [certsdir="$withval"], [certsdir="${default_certsdir}"])
+AC_SUBST(certsdir)
+AC_DEFINE_UNQUOTED([SWITCH_CERTS_DIR],"${certsdir}",[where to put certs files])
+
+AC_ARG_WITH([scriptdir],
+ [AS_HELP_STRING([--with-scriptdir=DIR], [Put script files into this location (default: $prefix/scripts)])], [scriptdir="$withval"], [scriptdir="${default_scriptdir}"])
+AC_SUBST(scriptdir)
+AC_DEFINE_UNQUOTED([SWITCH_SCRIPT_DIR],"${scriptdir}",[where to put script files])
+
+AC_ARG_WITH([recordingsdir],
+ [AS_HELP_STRING([--with-recordingsdir=DIR], [Put recordings files into this location (default: $prefix/recordings)])], [recordingsdir="$withval"], [recordingsdir="${default_recordingsdir}"])
+AC_SUBST(recordingsdir)
+AC_DEFINE_UNQUOTED([SWITCH_RECORDINGS_DIR],"${recordingsdir}",[where to put recording files])
+
+AC_ARG_WITH([imagesdir],
+ [AS_HELP_STRING([--with-imagesdir=DIR], [Put images files into this location (default: $prefix/images)])], [imagesdir="$withval"], [imagesdir="${default_imagesdir}"])
+AC_SUBST(imagesdir)
+AC_DEFINE_UNQUOTED([SWITCH_IMAGES_DIR],"${imagesdir}",[where to put image files])
+
+AC_ARG_WITH([storagedir],
+ [AS_HELP_STRING([--with-storagedir=DIR], [Put storage files into this location (default: $prefix/storage)])], [storagedir="$withval"], [storagedir="${default_storagedir}"])
+AC_SUBST(storagedir)
+AC_DEFINE_UNQUOTED([SWITCH_STORAGE_DIR],"${storagedir}",[where to put storage files])
+
+AC_ARG_WITH([cachedir],
+ [AS_HELP_STRING([--with-cachedir=DIR], [Put cache files into this location (default: $prefix/cache)])], [cachedir="$withval"], [cachedir="${default_cachedir}"])
+AC_SUBST(cachedir)
+AC_DEFINE_UNQUOTED([SWITCH_CACHE_DIR],"${cachedir}",[where to put cache files])
+
+AC_SUBST(confdir)
+AC_DEFINE_UNQUOTED([SWITCH_CONF_DIR],"${confdir}",[directory for configuration files])
+
+AC_SUBST(datadir)
+AC_DEFINE_UNQUOTED([SWITCH_DATA_DIR],"${datadir}",[directory for data files])
+
+AC_SUBST(localstatedir)
+AC_DEFINE_UNQUOTED([SWITCH_LOCALSTATE_DIR],"${localstatedir}",[directory for local state files])
+AC_SUBST(bindir)
+AC_SUBST(includedir)
+
+AC_ARG_WITH([pkgconfigdir],
+ [AS_HELP_STRING([--with-pkgconfigdir=DIR], [Installation directory for pkgconfig file (default: ${libdir}/pkgconfig)])],
+ [case "${withval}" in
+ yes|no) AC_MSG_ERROR([Invalid value ${withval} for option --with-pkgconfigdir]) ;;
+ *) pkgconfigdir="${withval}" ;;
+ esac
+ ],
+ [pkgconfigdir="${default_pkgconfigdir}"]
+)
+AC_SUBST([pkgconfigdir])
+
+#Set default language
+AC_LANG_C
+# Checks for programs.
+AC_PROG_CC
+AC_PROG_CXX
+
+#check if the g++ compiler works
+AC_CACHE_CHECK([whether the C++ compiler works], [ac_cv_sys_cxx_works], [
+ AC_LANG_PUSH([C++])
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([int main() { }])], [ac_cv_sys_cxx_works=yes],
+ [ac_cv_sys_cxx_works=no])
+ AC_LANG_POP([C++])
+ ])
+[ if [ "x$ac_cv_sys_cxx_works" = "xno" ]; then ]
+ AC_MSG_FAILURE([The C++ compiler does not work. Please (re)install the C++ compiler])
+[ fi ]
+
+AC_PROG_AWK
+AC_PROG_MAKE_SET
+AC_PROG_INSTALL
+
+#override some default libtool behavior and invoke AC_PROG_LIBTOOL (see http://lists.gnu.org/archive/html/libtool/2007-03/msg00000.html)
+m4_defun([_LT_AC_LANG_F77_CONFIG], [:])
+m4_defun([_LT_AC_LANG_GCJ_CONFIG], [:])
+m4_defun([_LT_AC_LANG_RC_CONFIG], [:])
+AM_PROG_CC_C_O
+AC_PROG_LIBTOOL
+
+#Check for compiler vendor
+AX_COMPILER_VENDOR
+
+# Set CC_FOR_BUILD
+if test "x${cross_compiling}" = "xyes"; then
+ CC_FOR_BUILD=${CC_FOR_BUILD-gcc}
+ case "$host" in
+ arm*-linux-gnueabi*|arm*-*-linux-gnueabi*)
+ # spandsp modem
+ ac_cv_file__dev_ptmx=yes
+ # libjs
+ export ac_cv_va_copy=yes
+ # srtp
+ export ac_cv_file__dev_urandom=yes
+ # rpl_malloc
+ export ac_cv_func_realloc_0_nonnull=yes
+ export ac_cv_func_malloc_0_nonnull=yes
+ # apr
+ export ac_cv_func_setpgrp_void=yes
+ export ac_cv_file__dev_zero=yes
+ export apr_cv_tcp_nodelay_with_cork=yes
+ export ac_cv_file_dbd_apr_dbd_mysql_c=no
+ export ac_cv_sizeof_ssize_t=4
+ export apr_cv_mutex_recursive=yes
+ export ac_cv_func_pthread_rwlock_init=yes
+ export apr_cv_type_rwlock_t=yes
+ export apr_cv_process_shared_works=yes
+ export apr_cv_mutex_robust_shared=yes
+ ;;
+ esac
+else
+ CC_FOR_BUILD='$(CC)'
+fi
+AC_SUBST(CC_FOR_BUILD)
+
+if test -n "$lt_sysroot" ; then
+ APR_ADDTO(CFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(CXXFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(CPPFLAGS, --sysroot=$lt_sysroot)
+ APR_ADDTO(LDFLAGS, --sysroot=$lt_sysroot)
+ PKG_CONFIG_SYSROOT_DIR=$lt_sysroot
+fi
+
+# Optimize
+AC_ARG_ENABLE(optimization,
+[AC_HELP_STRING([--enable-optimization],[Set if you want us to add max optimising compiler flags])],[enable_optimizer="$enableval"],[enable_optimizer="no"])
+
+if test "${enable_optimizer}" = "yes" ; then
+ AC_DEFINE([OPTIMZER],[],[Enable Optimization.])
+ AX_CC_MAXOPT
+fi
+
+# set defaults for use on all platforms
+SWITCH_AM_CFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_CXXFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_CPPFLAGS="-I${switch_srcdir}/src/include -I${switch_builddir}/src/include -I${switch_srcdir}/libs/libteletone/src"
+SWITCH_AM_LDFLAGS="-lm"
+
+#set SOLINK variable based on compiler and host
+if test "x${ax_cv_c_compiler_vendor}" = "xsun" ; then
+ SOLINK="-Bdynamic -dy -G"
+elif test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ case "$host" in
+ *darwin*)
+ SOLINK="-dynamic -force-flat-namespace"
+ ;;
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+
+ esac
+elif test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ case "$host" in
+# older Xcode test for darwin, Xcode 4/5 use clang above
+ *darwin*)
+ SOLINK="-dynamic -bundle -force-flat-namespace"
+ ;;
+ *-solaris2*)
+ SOLINK="-shared -Xlinker"
+ ;;
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+ esac
+elif test "x${ax_cv_c_compiler_vendor}" = "xintel" ; then
+ case "$host" in
+ *)
+ SOLINK="-shared -Xlinker -x"
+ ;;
+ esac
+else
+ AC_ERROR([Please update configure.in with SOLINK values for your compiler])
+fi
+
+##
+# detect libtool major version,
+# set libtool library extension based on this
+# to work around some brokeness when using 'la' with libtool-1.5
+#
+AC_MSG_CHECKING([libtool major version])
+libtool="${switch_builddir}/libtool"
+LIBTOOL_MAJOR_VERSION="`$libtool --version 2>/dev/null| sed -e 's/([[^)]]*)//g;s/^[[^0-9]]*//;s/[[- ]].*//g;q'| awk 'BEGIN { FS = "." } { print $1 }' `"
+if test -z "$LIBTOOL_MAJOR_VERSION" ; then
+ LIBTOOL_MAJOR_VERSION="`sed -n -e '/^VERSION/{s/^.*=\"\{0,1\}\([[0-9]]\{1,\}\)\..*/\1/;p;}' ${switch_srcdir}/build/config/ltmain.sh`"
+fi
+if test -z "$LIBTOOL_MAJOR_VERSION" ; then
+ AC_MSG_ERROR([Failed to detect your libtool version, please open a bug report on https://freeswitch.org/jira])
+fi
+AC_MSG_RESULT([${LIBTOOL_MAJOR_VERSION}])
+
+LIBTOOL_LIB_EXTEN=so
+
+if test "${LIBTOOL_MAJOR_VERSION}" = "2" ; then
+ LIBTOOL_LIB_EXTEN="la"
+fi
+AC_MSG_RESULT([using libtool library extension... ${LIBTOOL_LIB_EXTEN}])
+
+if test "$ax_cv_c_compiler_vendor" = "gnu"; then
+ saved_CFLAGS="$CFLAGS"
+ AC_CACHE_CHECK([whether compiler supports -Wno-unused-result],
+ [ac_cv_gcc_supports_w_no_unused_result], [
+ CFLAGS="$CFLAGS -Wno-unused-result -Wno-error=unused-result"
+ AC_TRY_COMPILE([],[return 0;],
+ [ac_cv_gcc_supports_w_no_unused_result=yes],
+ [ac_cv_gcc_supports_w_no_unused_result=no])])
+ CFLAGS="$saved_CFLAGS"
+ AC_MSG_RESULT($ac_cv_gcc_supports_w_no_unused_result)
+
+ saved_CFLAGS="$CFLAGS"
+ AC_CACHE_CHECK([whether compiler supports -Wno-misleading-indentation],
+ [ac_cv_gcc_supports_w_no_misleading_indentation], [
+ CFLAGS="$CFLAGS -Wno-misleading-indentation -Wno-error=misleading-indentation"
+ AC_TRY_COMPILE([],[return 0;],
+ [ac_cv_gcc_supports_w_no_misleading_indentation=yes],
+ [ac_cv_gcc_supports_w_no_misleading_indentation=no])])
+ CFLAGS="$saved_CFLAGS"
+ AC_MSG_RESULT($ac_cv_gcc_supports_w_no_misleading_indentation)
+fi
+
+# tweak compiler specific flags
+if test "x${ax_cv_c_compiler_vendor}" = "xsun" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -KPIC)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DPIC)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -erroff=E_END_OF_LOOP_CODE_NOT_REACHED)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -errtags=yes)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -D__FUNCTION__=__func__ )
+ APR_ADDTO(SWITCH_AM_CFLAGS, -mt)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -errtags=yes)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -KPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -DPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, "-features=extensions")
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -D__FUNCTION__=__func__)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -mt)
+
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -R${prefix}/lib)
+ if test "${enable_64}" = "yes"; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -m64)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -m64)
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -m64)
+ LIBS="$LIBS -m64"
+ fi
+elif test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Werror)
+elif test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC -ffast-math)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC -ffast-math)
+ AC_SUBST([AM_MOD_AVMD_CXXFLAGS], [-std=gnu99]) # FS-8809, needed for MAP_POPULATE
+ if test "$ac_cv_gcc_supports_w_no_unused_result" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Werror)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-unused-result)
+ fi
+ if test "$ac_cv_gcc_supports_w_no_misleading_indentation" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-misleading-indentation)
+ fi
+ if test "${enable_64}" = "yes"; then
+ case "$host" in
+ *darwin*)
+ osxvrm=`sw_vers -productVersion` # Get version.release.modlevel
+ osxrel=`echo $osxvrm | cut -d. -f2` # Get release only
+ if test "$osxrel" -ge 4; then # 10.4 and up are x64
+ APR_ADDTO(CFLAGS, -arch x86_64)
+ APR_ADDTO(LDFLAGS, -arch x86_64)
+ APR_ADDTO(CXXFLAGS, -arch x86_64)
+ fi
+ ;;
+ *-solaris2*)
+ APR_ADDTO(CFLAGS, -m64)
+ APR_ADDTO(LDFLAGS, -m64)
+ APR_ADDTO(CXXFLAGS, -m64)
+ ;;
+ *)
+ LIBS="$LIBS -m64"
+ ;;
+ esac
+ fi
+fi
+
+case "${ax_cv_c_compiler_vendor}" in
+sun)
+ VISIBILITY_FLAG=-xldscope=hidden
+;;
+*)
+ VISIBILITY_FLAG=-fvisibility=hidden
+;;
+esac
+AC_SUBST(VISIBILITY_FLAG)
+
+#
+# gcc visibility cflag checks
+#
+AC_ARG_ENABLE([visibility],
+ [AS_HELP_STRING([--disable-visibility], [Disable or enable API visibility support (default: use if available)])],
+ [enable_visibility="${enableval}"],
+ [enable_visibility="detect"]
+)
+HAVE_VISIBILITY="no"
+
+if test "x${enable_visibility}" != "xno" ; then
+
+ case "${ax_cv_c_compiler_vendor}" in
+ gnu)
+ save_CFLAGS="${CFLAGS}"
+ CFLAGS="${CFLAGS} -fvisibility=hidden"
+ AC_MSG_CHECKING([whether the compiler supports -fvisibility=hidden])
+ AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM(
+ [int foo __attribute__ ((visibility("default")));],
+ [;]
+ )],
+
+ [AC_MSG_RESULT([yes])
+ APR_ADDTO([SWITCH_AM_CFLAGS], [-fvisibility=hidden -DSWITCH_API_VISIBILITY=1 -DCJSON_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ APR_ADDTO([SWITCH_AM_CXXFLAGS], [-fvisibility=hidden -DSWITCH_API_VISIBILITY=1 -DCJSON_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ HAVE_VISIBILITY="yes"],
+
+ [AC_MSG_RESULT([no])]
+ )
+ CFLAGS="${save_CFLAGS}"
+ ;;
+
+ sun)
+ # save_CFLAGS="${CFLAGS}"
+ # CFLAGS="${CFLAGS} -xldscope=hidden"
+ # AC_MSG_CHECKING([whether the compiler supports -xldscope=hidden])
+ # AC_COMPILE_IFELSE(
+ # [AC_LANG_PROGRAM(
+ # [int foo __attribute__ ((visibility("default")));],
+ # [;]
+ # )],
+ #
+ # [AC_MSG_RESULT([yes])
+ # APR_ADDTO([SWITCH_AM_CFLAGS], [-xldscope=hidden -DSWITCH_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ # APR_ADDTO([SWITCH_AM_CXXFLAGS], [-xldscope=hidden -DSWITCH_API_VISIBILITY=1 -DHAVE_VISIBILITY=1])
+ # HAVE_VISIBILITY="yes"],
+ #
+ # [AC_MSG_RESULT([no])]
+ # )
+ # CFLAGS="${save_CFLAGS}"
+ ;;
+
+ *)
+ if test "x${enable_visibility}" = "xyes" ; then
+ AC_MSG_ERROR([Non-GNU / SUN compilers are currently unsupported])
+ else
+ AC_MSG_WARN([Non-GNU / SUN compilers are currently unsupported])
+ fi
+ ;;
+ esac
+
+ #
+ # visibility explicitly requested but not supported by this compiler => error
+ #
+ if test "x${enable_visibility}" = "xyes" -a "x${HAVE_VISIBILITY}" = "xno" ; then
+ AC_MSG_ERROR([API visibility not supported by this compiler])
+ fi
+fi
+
+# Enable debugging (default: on)
+# (rename option if the default is changed)
+AC_ARG_ENABLE(debug,
+[AC_HELP_STRING([--disable-debug],[build without debug information])],[enable_debug="$enableval"],[enable_debug="yes"])
+
+if test "${enable_debug}" = "yes"; then
+ AC_DEFINE([DEBUG],[],[Enable extra debugging.])
+ saved_CFLAGS="$CFLAGS"
+ CFLAGS=
+ AX_CFLAGS_WARN_ALL_ANSI
+ SWITCH_ANSI_CFLAGS=$CFLAGS
+ CFLAGS="$saved_CFLAGS"
+
+ if test "x${ax_cv_c_compiler_vendor}" = "xgnu" ; then
+ if test "$ac_cv_gcc_supports_w_no_unused_result" = yes; then
+ APR_ADDTO(SWITCH_AM_CFLAGS, -Wno-unused-result)
+ fi
+ APR_ADDTO(SWITCH_AM_CFLAGS, -g -ggdb)
+ export DEBUG_CFLAGS="-g -ggdb"
+ fi
+
+fi
+
+AC_ARG_ENABLE(libyuv,
+[AC_HELP_STRING([--disable-libyuv],[build without libyuv])],[enable_libyuv="$enableval"],[enable_libyuv="yes"])
+
+AM_CONDITIONAL([ENABLE_LIBYUV],[test "${enable_libyuv}" = "yes"])
+
+AC_ARG_ENABLE(libvpx,
+[AC_HELP_STRING([--disable-libvpx],[build without libvpx])],[enable_libvpx="$enableval"],[enable_libvpx="yes"])
+
+AM_CONDITIONAL([ENABLE_LIBVPX],[test "${enable_libvpx}" = "yes"])
+
+AC_ARG_ENABLE(cpp,
+[AC_HELP_STRING([--disable-cpp],[build without cpp code])],[enable_cpp="$enableval"],[enable_cpp="yes"])
+
+AM_CONDITIONAL([ENABLE_CPP],[test "${enable_cpp}" = "yes"])
+
+AM_CONDITIONAL([DISABLE_CC],[test "${disable_cc}" = "yes"])
+
+AC_ARG_ENABLE([system-xmlrpc-c],
+ [AS_HELP_STRING([--enable-system-xmlrpc-c],
+ [use system lib for xmlrpc-c])],,
+ [enable_xmlrpcc="no"])
+
+if test "${enable_xmlrpcc}" = "yes" ; then
+SYS_XMLRPC_CFLAGS=`xmlrpc-c-config --cflags`
+SYS_XMLRPC_LDFLAGS=`xmlrpc-c-config --libs`
+fi
+AC_SUBST(SYS_XMLRPC_CFLAGS)
+AC_SUBST(SYS_XMLRPC_LDFLAGS)
+AM_CONDITIONAL([SYSTEM_XMLRPCC],[test "${enable_xmlrpcc}" = "yes"])
+
+for luaversion in luajit lua5.2 lua-5.2 lua52 lua5.1 lua-5.1 lua; do
+ PKG_CHECK_MODULES([LUA],[${luaversion}],[have_lua=yes],[have_lua=no])
+ if test ${have_lua} = yes; then
+ break
+ fi
+done
+if test x"${LUA_LIBS}" = x"" ; then
+ LUA_LIBS="-llua"
+fi
+
+AC_ARG_ENABLE(srtp,
+[AC_HELP_STRING([--disable-srtp],[build without srtp support])],[enable_srtp="$enableval"],[enable_srtp="yes"])
+
+AM_CONDITIONAL([ENABLE_SRTP],[test "${enable_srtp}" = "yes"])
+
+have_openal=no
+AC_CHECK_LIB(openal, alMidiGainSOFT, [have_openal="yes"])
+AM_CONDITIONAL([HAVE_OPENAL],[test "${have_openal}" = "yes"])
+
+AC_ARG_ENABLE(zrtp,
+ [AS_HELP_STRING([--enable-zrtp], [Compile with zrtp Support])],,[enable_zrtp="no"])
+if test "x$enable_zrtp" = "xyes" ; then
+ LIBS="-lpthread $LIBS"
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DENABLE_ZRTP)
+fi
+
+PA_LIBS=
+
+PKG_CHECK_MODULES(JACK, jack, have_jack=yes, have_jack=no)
+if test "x$have_jack" = "xyes" ; then
+PA_LIBS+=$JACK_LIBS
+fi
+
+AC_CHECK_LIB(asound, snd_pcm_open, have_alsa=yes, have_alsa=no)
+if test "x$have_alsa" = "xyes" ; then
+PA_LIBS+=-lasound
+fi
+
+AC_SUBST(PA_LIBS)
+
+AM_CONDITIONAL([ENABLE_ZRTP],[test "x$enable_zrtp" != "xno"])
+
+AM_CONDITIONAL([WANT_DEBUG],[test "${enable_debug}" = "yes"])
+
+AC_ARG_ENABLE(core-odbc-support,
+ [AS_HELP_STRING([--enable-core-odbc-support], [Compile with ODBC Support (default is optional)])],,[enable_core_odbc_support="optional"])
+if ! test "$enable_core_odbc_support" = "no"; then
+ AX_LIB_ODBC
+ if test "$ac_cv_found_odbc" = "yes" ; then
+ enable_core_odbc_support="yes"
+ elif test "$enable_core_odbc_support" = "yes"; then
+ AC_MSG_ERROR([no usable libodbc; please install unixodbc devel package or equivalent])
+ else
+ enable_core_odbc_support="no"
+ fi
+fi
+
+CHECK_LIBUUID
+SWITCH_AM_LDFLAGS="$LIBUUID_LIBS $SWITCH_AM_LDFLAGS"
+SWITCH_AM_CFLAGS="$LIBUUID_CFLAGS $SWITCH_AM_CFLAGS"
+
+AC_ARG_ENABLE(core-pgsql-pkgconfig,
+ [AS_HELP_STRING([--disable-core-pgsql-pkgconfig], [Use pg_config to get PGQSL build options])],[enable_core_pgsql_pkgconfig="$enableval"],[enable_core_pgsql_pkgconfig="yes"])
+
+path_remove () {
+ echo "$1" | tr ':' '\n' | grep -Fxv "$2" | tr '\n' ':' | sed 's/:$//'
+}
+path_push_unique () {
+ x="$(eval echo \$$1)"
+ x="$(path_remove "$x" "$2")"
+ if test -z "$x"; then
+ eval export $1="$2"
+ else
+ eval export $1="$2:$x"
+ fi
+}
+
+AC_PATH_PROG([PG_CONFIG], [pg_config], [no])
+AC_PATH_PROG([PKG_CONFIG], [pkg-config], [no])
+
+case $host in
+ *-darwin*)
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/libpq/lib/pkgconfig
+ ;;
+esac
+
+if test "$PKG_CONFIG" = "no" \
+ || test x"$enable_core_pgsql_pkgconfig" = x"no" \
+ || ! pkg-config libpq; then
+ if test "$PG_CONFIG" != "no"; then
+ AC_MSG_CHECKING([for PostgreSQL libraries via pg_config])
+ POSTGRESQL_CFLAGS="-I`$PG_CONFIG --includedir`"
+ POSTGRESQL_LIBDIR="-L`$PG_CONFIG --libdir`"
+ POSTGRESQL_LDFLAGS="-L`$PG_CONFIG --libdir` -lpq"
+ POSTGRESQL_VERSION=`$PG_CONFIG --version | awk '{ print $NF }'`
+ POSTGRESQL_MAJOR_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $1 }'`
+ POSTGRESQL_MINOR_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $2 }'`
+ POSTGRESQL_PATCH_VERSION=`$PG_CONFIG --version | awk '{ print $NF }' | awk -F. '{ print $3 }'`
+ fi
+else
+
+ AC_MSG_CHECKING([for PostgreSQL libraries via pkg_config])
+ POSTGRESQL_CFLAGS="`$PKG_CONFIG --cflags libpq`"
+ POSTGRESQL_LIBDIR="`$PKG_CONFIG libpq --libs-only-L`"
+ POSTGRESQL_LDFLAGS="`$PKG_CONFIG --libs libpq`"
+ POSTGRESQL_VERSION="`$PKG_CONFIG --modversion libpq`"
+ POSTGRESQL_MAJOR_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f1 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ POSTGRESQL_MINOR_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f2 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ POSTGRESQL_PATCH_VERSION="`echo $POSTGRESQL_VERSION | cut -d. -f3 | sed 's/^\([[0-9]]*\)[[^0-9]].*/\1/'`"
+ test -n "$POSTGRESQL_PATCH_VERSION" || POSTGRESQL_PATCH_VERSION=0
+fi
+AC_MSG_RESULT([$POSTGRESQL_LIBDIR])
+AC_DEFINE_UNQUOTED([POSTGRESQL_VERSION], "${POSTGRESQL_VERSION}", [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_MAJOR_VERSION], ${POSTGRESQL_MAJOR_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_MINOR_VERSION], ${POSTGRESQL_MINOR_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+AC_DEFINE_UNQUOTED([POSTGRESQL_PATCH_VERSION], ${POSTGRESQL_PATCH_VERSION}, [Specifies the version of PostgreSQL we are linking against])
+have_libpq=no
+AC_CHECK_LIB([pq], [PQgetvalue], [have_libpq="yes"])
+AM_CONDITIONAL([HAVE_PGSQL],[test "${have_libpq}" = "yes"])
+AC_SUBST([POSTGRESQL_CFLAGS], [$POSTGRESQL_CFLAGS])
+AC_SUBST([POSTGRESQL_LDFLAGS], [$POSTGRESQL_LDFLAGS])
+AC_SUBST([POSTGRESQL_LIBDIR], [$POSTGRESQL_LIBDIR])
+
+
+PKG_CHECK_MODULES([MARIADB], [libmariadb >= 3.0.9],[
+ AM_CONDITIONAL([HAVE_MARIADB],[true])],[
+ PKG_CHECK_MODULES([MARIADB], [mariadb >= 3.0.9],[
+ AM_CONDITIONAL([HAVE_MARIADB],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MARIADB],[false])
+ ])
+])
+
+PKG_CHECK_MODULES([SPANDSP], [spandsp >= 3.0],[
+ AM_CONDITIONAL([HAVE_SPANDSP],[true])],[
+ AC_MSG_ERROR([no usable spandsp; please install spandsp3 devel package or equivalent])
+])
+
+PKG_CHECK_MODULES([SOFIA_SIP], [sofia-sip-ua >= 1.12.12],[
+ AM_CONDITIONAL([HAVE_SOFIA_SIP],[true])],[
+ AC_MSG_ERROR([no usable sofia-sip; please install sofia-sip-ua devel package or equivalent])
+])
+
+AC_ARG_ENABLE(deprecated-core-db-events,
+ [AS_HELP_STRING([--enable-deprecated-core-db-events], [Keep deprecated core db events])],,[enable_deprecated_core_db_events="no"])
+
+if test x"$enable_deprecated_core_db_events" = x"yes" ; then
+ AC_DEFINE([SWITCH_DEPRECATED_CORE_DB], [1], [Define to 1 to enable deprecated core db events])
+fi
+
+ESL_LDFLAGS=
+PLATFORM_CORE_LDFLAGS=
+PLATFORM_CORE_LIBS=
+
+# tweak platform specific flags
+case "$host" in
+ *darwin*)
+ # Common Apple Darwin settings
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DMACOSX)
+ APR_REMOVEFROM(SWITCH_AM_CFLAGS, -fPIC)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/opt/openssl/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/opt/openssl/lib)
+ if test "x$enable_core_odbc_support" != "xno"; then
+ APR_ADDTO([PLATFORM_CORE_LDFLAGS], [--framework CoreFoundation])
+ fi
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl])
+ # Get OSX and clang version
+ osxvrm=`sw_vers -productVersion` # Get version.release.modlevel
+ osxrel=`echo $osxvrm | cut -d. -f2` # Get release only
+ clangvers="`clang -v 2>&1 >/dev/null | grep version | sed -e 's/.*version \([[0-9]]*\).*$/\1/'`"
+ if test "$clangvers" -ge 6; then # Xcode 6 drops std lib search, add it to clang
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ fi
+ if test "$clangvers" -ge 4; then # Xcode 4 / 10.7 and up
+ APR_ADDTO(CFLAGS, -Wno-deprecated-declarations)
+ fi
+ if test "$osxrel" -ge 6; then # 10.6 and up
+ APR_ADDTO(CFLAGS, -pipe -no-cpp-precomp)
+ APR_ADDTO(LDFLAGS, -pipe -bind_at_load)
+ APR_ADDTO(CXXFLAGS, -pipe)
+ fi
+ ;;
+ *-solaris2*)
+ if test "${enable_64}" = "yes"; then
+ APR_ADDTO(CPPFLAGS, [-I/opt/64/include])
+ APR_ADDTO(LDFLAGS, [-L/opt/64/lib -Wl,-rpath,/opt/64/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, [-I/opt/64/include])
+ APR_ADDTO(SWITCH_AM_LDFLAGS, [-L/opt/64/lib -Wl,-rpath,/opt/64/lib])
+ else
+ APR_ADDTO(CPPFLAGS, [-I/opt/include])
+ APR_ADDTO(LDFLAGS, [-L/opt/lib -Wl,-rpath,/opt/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, [-I/opt/include])
+ APR_ADDTO(SWITCH_AM_LDFLAGS, [-L/opt/lib -Wl,-rpath,/opt/lib])
+ fi
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DPATH_MAX=2048 -D__EXTENSIONS__)
+ APR_ADDTO(SWITCH_AM_LDFLAGS, -lsendfile -lresolv -lsocket -lnsl -luuid)
+ APR_ADDTO(ESL_LDFLAGS, -lnsl -lsocket)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl -lcrypt -lrt -lsendfile -lresolv -lsocket -lnsl -luuid])
+ ;;
+ *dragonfly*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ ;;
+ *openbsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib -ltermcap)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ ;;
+ *netbsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/pkg/include)
+ APR_ADDTO(LDFLAGS, [-L/usr/pkg/lib -Wl,-rpath,/usr/pkg/lib])
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/pkg/include)
+ ;;
+ *bsd*)
+ APR_ADDTO(CPPFLAGS, -I/usr/local/include)
+ APR_ADDTO(LDFLAGS, -L/usr/local/lib)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -I/usr/local/include)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-lcrypt -lrt])
+ ;;
+ *linux*)
+ APR_ADDTO([PLATFORM_CORE_LIBS], [-ldl -lcrypt -lrt])
+ ;;
+esac
+
+APR_REMOVEFROM(SWITCH_AM_CXXFLAGS, -std=c99)
+
+# check for usable system MD5 library
+AS_CASE([$host],
+ [*-solaris2*], [AC_CHECK_LIB(md5, MD5Init)],
+ [*-freebsd*], [AC_CHECK_LIB(md, MD5Init)],
+ [*-openbsd*|*-netbsd*], [AC_CHECK_FUNCS([MD5Init])])
+
+AC_CHECK_LIB(z, inflateReset, have_libz=yes, AC_MSG_ERROR([no usable zlib; please install zlib devel package or equivalent]))
+if test "x$have_libz" = "xyes" ; then
+APR_ADDTO([PLATFORM_CORE_LIBS], [-lz])
+fi
+
+PKG_CHECK_MODULES([MPG123], [libmpg123 >= 1.16.0],[
+ AM_CONDITIONAL([HAVE_MPG123],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MPG123],[false])])
+
+PKG_CHECK_MODULES([AMR], [opencore-amrnb >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_AMR],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMR],[false])])
+
+PKG_CHECK_MODULES([AMRWB], [opencore-amrwb >= 0.1.0 vo-amrwbenc >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_AMRWB],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMRWB],[false])])
+
+AC_CHECK_LIB(apr-1, apr_pool_mutex_set, use_system_apr=yes, use_system_apr=no)
+AM_CONDITIONAL([SYSTEM_APR],[test "${use_system_apr}" = "yes"])
+AC_CHECK_LIB(aprutil-1, apr_queue_pop_timeout, use_system_aprutil=yes, use_system_aprutil=no)
+AM_CONDITIONAL([SYSTEM_APRUTIL],[test "${use_system_aprutil}" = "yes"])
+
+save_LIBS="$LIBS"
+LIBS=
+AC_CHECK_LIB(jpeg, jpeg_std_error,, AC_MSG_ERROR([no usable libjpeg; please install libjpeg devel package or equivalent]))
+
+AC_CHECK_LIB(jbig, jbg_enc_out, have_libjbig=yes, have_libjbig=no)
+if test "x$have_libjbig" = "xyes" ; then
+SPANDSP_LA_JBIG="-ljbig $LIBS"
+AC_SUBST(SPANDSP_LA_JBIG)
+fi
+LIBS="$save_LIBS"
+
+AC_CHECK_LIB(lzma, lzma_code, have_liblzma=yes, have_liblzma=no)
+if test "x$have_liblzma" = "xyes" ; then
+SPANDSP_LA_LZMA="-llzma"
+AC_SUBST(SPANDSP_LA_LZMA)
+fi
+
+AC_CHECK_LIB(resolv, res_init, have_libresolv=yes, have_libresolv=no)
+if test "x$have_libresolv" = "xyes" ; then
+APR_ADDTO(SWITCH_AM_LDFLAGS, -lresolv)
+fi
+
+AC_SUBST(SWITCH_AM_CFLAGS)
+AC_SUBST(SWITCH_ANSI_CFLAGS)
+AC_SUBST(SWITCH_AM_CXXFLAGS)
+AC_SUBST(SWITCH_AM_CPPFLAGS)
+AC_SUBST(SWITCH_AM_LDFLAGS)
+AC_SUBST(ESL_LDFLAGS)
+AC_SUBST(PLATFORM_CORE_LDFLAGS)
+AC_SUBST(PLATFORM_CORE_LIBS)
+AC_SUBST(SOLINK)
+AC_SUBST(LIBTOOL_LIB_EXTEN)
+
+# Checks for header files.
+AC_HEADER_DIRENT
+AC_HEADER_STDC
+AC_CHECK_HEADERS([sys/types.h sys/resource.h sched.h wchar.h sys/filio.h sys/ioctl.h sys/prctl.h sys/select.h netdb.h execinfo.h sys/time.h])
+
+# Solaris 11 privilege management
+AS_CASE([$host],
+ [*-*-solaris2.11], [AC_CHECK_HEADER([priv.h], [AC_DEFINE([SOLARIS_PRIVILEGES],[1],[Solaris 11 privilege management])])]
+)
+
+
+if test x"$ac_cv_header_wchar_h" = xyes; then
+ HAVE_WCHAR_H_DEFINE=1
+else
+ HAVE_WCHAR_H_DEFINE=0
+fi
+AC_SUBST(HAVE_WCHAR_H_DEFINE)
+
+# Needed by Abyss on Solaris:
+
+if test x"$ac_cv_header_sys_filio_h" = xyes; then
+ HAVE_SYS_FILIO_H_DEFINE=1
+else
+ HAVE_SYS_FILIO_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_FILIO_H_DEFINE)
+
+# Needed by Abyss on Solaris:
+
+if test x"$ac_cv_header_sys_ioctl_h" = xyes; then
+ HAVE_SYS_IOCTL_H_DEFINE=1
+else
+ HAVE_SYS_IOCTL_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_IOCTL_H_DEFINE)
+
+if test x"$ac_cv_header_sys_select_h" = xyes; then
+ HAVE_SYS_SELECT_H_DEFINE=1
+else
+ HAVE_SYS_SELECT_H_DEFINE=0
+fi
+AC_SUBST(HAVE_SYS_SELECT_H_DEFINE)
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_C_INLINE
+AC_TYPE_SIZE_T
+AC_HEADER_TIME
+AC_STRUCT_TM
+
+# Checks for library functions.
+AC_PROG_GCC_TRADITIONAL
+AC_FUNC_MALLOC
+AC_TYPE_SIGNAL
+AC_FUNC_STRFTIME
+AC_CHECK_FUNCS([gethostname vasprintf mmap mlock mlockall usleep getifaddrs timerfd_create getdtablesize posix_openpt poll])
+AC_CHECK_FUNCS([sched_setscheduler setpriority setrlimit setgroups initgroups getrusage])
+AC_CHECK_FUNCS([wcsncmp setgroups asprintf setenv pselect gettimeofday localtime_r gmtime_r strcasecmp stricmp _stricmp])
+
+# Check availability and return type of strerror_r
+# (NOTE: apr-1-config sets -D_GNU_SOURCE at build-time, need to run the check with it too)
+save_CPPFLAGS="$CPPFLAGS"
+CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+AC_FUNC_STRERROR_R
+CPPFLAGS="$save_CPPFLAGS"
+
+AX_HAVE_CPU_SET
+
+AC_CHECK_LIB(rt, clock_gettime, [AC_DEFINE(HAVE_CLOCK_GETTIME, 1, [Define if you have clock_gettime()])])
+AC_CHECK_LIB(rt, clock_getres, [AC_DEFINE(HAVE_CLOCK_GETRES, 1, [Define if you have clock_getres()])])
+AC_CHECK_LIB(rt, clock_nanosleep, [AC_DEFINE(HAVE_CLOCK_NANOSLEEP, 1, [Define if you have clock_nanosleep()])])
+AC_CHECK_LIB(pthread, pthread_setschedparam, [AC_DEFINE(HAVE_PTHREAD_SETSCHEDPARAM, 1, [Define if you have pthread_setschedparam()])])
+
+AC_CHECK_FUNC(socket, , AC_CHECK_LIB(socket, socket))
+
+AC_CHECK_FILE(/dev/ptmx, [AC_DEFINE(HAVE_DEV_PTMX, 1, [Define if you have /dev/ptmx])])
+AC_CHECK_LIB(util, openpty, [AC_DEFINE(HAVE_OPENPTY, 1, [Define if you have openpty()])])
+
+AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[
+#include
+#include ])
+
+AC_CHECK_MEMBERS([struct tm.tm_zone],,,[
+#include
+#include ])
+
+AC_CHECK_DECL([RLIMIT_MEMLOCK],
+ [AC_DEFINE([HAVE_RLIMIT_MEMLOCK],[1],[RLIMIT_MEMLOCK constant for setrlimit])],,
+ [#ifdef HAVE_SYS_RESOURCE_H
+ #include
+ #endif])
+
+AC_CHECK_DECL([SCHED_RR],
+ [AC_DEFINE([HAVE_SCHED_RR],[1],[SCHED_RR constant for sched_setscheduler])],,
+ [#ifdef HAVE_SCHED_H
+ #include
+ #endif])
+
+AC_CHECK_DECL([SCHED_FIFO],
+ [AC_DEFINE([HAVE_SCHED_FIFO],[1],[SCHED_FIFO constant for sched_setscheduler])],,
+ [#ifdef HAVE_SCHED_H
+ #include
+ #endif])
+
+#
+# use mlockall only on linux (for now; if available)
+#
+if test "x${ac_cv_func_mlockall}" = "xyes"; then
+ AC_MSG_CHECKING([whether to use mlockall])
+ case "$host" in
+ *-linux-*)
+ AC_DEFINE([USE_MLOCKALL],[1],[Enable mlockall support])
+ AC_MSG_RESULT([yes])
+ USE_MLOCKALL=yes
+ ;;
+ *-freebsd*)
+ APR_ADDTO(SWITCH_AM_CFLAGS, -fPIC)
+ APR_ADDTO(SWITCH_AM_CXXFLAGS, -fPIC)
+ AC_MSG_RESULT([no, broken for non-root users])
+ ;;
+ *)
+ AC_MSG_RESULT([no])
+ ;;
+ esac
+
+ #
+ # setrlimit prerequisites
+ #
+ if test "x${USE_MLOCKALL}" = "xyes" -a \
+ "x${ac_cv_func_setrlimit}" = "xyes" -a \
+ "x${ac_cv_have_decl_RLIMIT_MEMLOCK}" = "xyes"
+ then
+ AC_DEFINE([USE_SETRLIMIT],[1],[Use setrlimit to disable mlock limit for non-root users])
+ fi
+fi
+
+#
+# sched_setcheduler + round-robin scheduler prerequisites
+#
+if test "x${ac_cv_func_sched_setscheduler}" = "xyes" -a \
+ "x${ac_cv_have_decl_SCHED_RR}" = "xyes"
+then
+ AC_DEFINE([USE_SCHED_SETSCHEDULER],[1],[Enable round-robin scheduler using sched_setscheduler])
+fi
+#
+# xmlrpc-c checks
+#
+
+AC_CHECK_FUNCS(setenv strtoll strtoull strtoq strtouq __strtoll __strtoull)
+
+HAVE_LIBWWW_SSL_DEFINE=0
+AC_SUBST(HAVE_LIBWWW_SSL_DEFINE)
+
+DIRECTORY_SEPARATOR="/"
+AC_SUBST(DIRECTORY_SEPARATOR)
+
+va_list_is_array=no
+AC_MSG_CHECKING(whether va_list is an array)
+AC_TRY_COMPILE([
+#include
+], [va_list list1, list2; list1 = list2;], ,
+va_list_is_array=yes)
+AC_MSG_RESULT($va_list_is_array)
+if test x"$va_list_is_array" = xyes; then
+ VA_LIST_IS_ARRAY_DEFINE=1
+else
+ VA_LIST_IS_ARRAY_DEFINE=0
+fi
+AC_SUBST(VA_LIST_IS_ARRAY_DEFINE)
+
+
+AC_MSG_CHECKING(whether compiler has __attribute__)
+AC_TRY_COMPILE(, [int x __attribute__((__unused__));],
+compiler_has_attribute=yes,
+compiler_has_attribute=no)
+AC_MSG_RESULT($compiler_has_attribute)
+if test x"$compiler_has_attribute" = xyes; then
+ ATTR_UNUSED="__attribute__((__unused__))"
+else
+ ATTR_UNUSED=
+fi
+AC_SUBST(ATTR_UNUSED)
+
+
+saved_CFLAGS="$CFLAGS"
+AC_CACHE_CHECK([whether compiler supports -Wdeclaration-after-statement], [ac_cv_gcc_declaration_after_statement], [
+CFLAGS="$CFLAGS -Wdeclaration-after-statement"
+AC_TRY_COMPILE([],[return 0;],[ac_cv_gcc_declaration_after_statement=yes],[ac_cv_gcc_declaration_after_statement=no])
+])
+AC_MSG_RESULT($ac_cv_gcc_declaration_after_statement)
+if test x"$ac_cv_gcc_declaration_after_statement" = xyes; then
+ APR_ADDTO(SWITCH_ANSI_CFLAGS, -Wdeclaration-after-statement)
+fi
+CFLAGS="$saved_CFLAGS"
+
+if test "x${ax_cv_c_compiler_vendor}" = "xclang" ; then
+ saved_CFLAGS="$CFLAGS"
+ # Next check added for Xcode 5 and systems with clang 5 llvm 3.3 or above, extended offset must be off
+ AC_CACHE_CHECK([whether compiler supports -Wextended-offsetof], [ac_cv_clang_extended_offsetof], [
+ CFLAGS="$CFLAGS -Wno-extended-offsetof"
+ AC_TRY_COMPILE([],[return 0;],[ac_cv_clang_extended_offsetof=yes],[ac_cv_clang_extended_offsetof=no])
+ ])
+ AC_MSG_RESULT($ac_cv_clang_extended_offsetof)
+ if test x"$ac_cv_clang_extended_offsetof" = xyes; then
+ APR_ADDTO(CFLAGS, -Wno-extended-offsetof)
+ fi
+ CFLAGS="$saved_CFLAGS"
+fi
+
+# Tested and fixed lot of modules, but some are untested. Will be added back when the core team decide it ready
+# Untested modules : mod_osp mod_soundtouch mod_sangoma_codec mod_dingaling mod_opal mod_h323 mod_khomp
+# mod_unimrcp mod_cepstral mod_erlang_event mod_snmp mod_perl mod_java mod_managed
+#
+#saved_CFLAGS="$CFLAGS"
+#AC_CACHE_CHECK([whether compiler supports -Wunused-but-set-variable], [ac_cv_gcc_unused_but_set_variable], [
+#CFLAGS="$CFLAGS -Wunused-but-set-variable"
+#AC_TRY_COMPILE([],[return 0;],[ac_cv_gcc_unused_but_set_variable=yes],[ac_cv_gcc_unused_but_set_variable=no])
+#])
+#AC_MSG_RESULT($ac_cv_gcc_unused_but_set_variable)
+#if test x"$ac_cv_gcc_unused_but_set_variable" = xyes; then
+# APR_ADDTO(SWITCH_ANSI_CFLAGS, -Wunused-but-set-variable)
+#fi
+#CFLAGS="$saved_CFLAGS"
+
+AC_C_BIGENDIAN(AC_DEFINE([SWITCH_BYTE_ORDER],__BIG_ENDIAN,[Big Endian]),AC_DEFINE([SWITCH_BYTE_ORDER],__LITTLE_ENDIAN,[Little Endian]))
+
+# Checks for integer size
+AC_CHECK_SIZEOF(char, 1)
+AC_CHECK_SIZEOF(int, 4)
+AC_CHECK_SIZEOF(long, 4)
+AC_CHECK_SIZEOF(short, 2)
+AC_CHECK_SIZEOF(long long, 8)
+AC_TYPE_SIZE_T
+AC_CHECK_TYPE(ssize_t, int)
+
+# Checks for pointer size
+AC_CHECK_SIZEOF(void*, 4)
+
+if test "x$ac_cv_sizeof_voidp" != "x"; then
+ voidp_size=$ac_cv_sizeof_voidp
+else
+ AC_ERROR([Cannot determine size of void*])
+fi
+
+if test "$ac_cv_sizeof_short" = "2"; then
+ short_value=short
+fi
+if test "$ac_cv_sizeof_int" = "4"; then
+ int_value=int
+fi
+
+if test "$ac_cv_sizeof_int" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "d"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "u"'
+ int64_value="int"
+ long_value=int
+elif test "$ac_cv_sizeof_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "ld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "lu"'
+ int64_value="long"
+ long_value=long
+ case "$host" in
+ *pc-solaris2*)
+ ;;
+ sparc-*-solaris2*)
+ ;;
+ *-solaris2*|*apple-darwin*|*-openbsd*)
+ if test "$ac_cv_sizeof_long_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "lld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "llu"'
+ int64_value="long long"
+ long_value="long long"
+ fi
+ ;;
+ esac
+elif test "$ac_cv_sizeof_long_long" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "lld"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "llu"'
+ int64_value="long long"
+ long_value="long long"
+elif test "$ac_cv_sizeof_longlong" = "8"; then
+ int64_t_fmt='#define SWITCH_INT64_T_FMT "qd"'
+ uint64_t_fmt='#define SWITCH_UINT64_T_FMT "qu"'
+ int64_value="__int64"
+ long_value="__int64"
+else
+ AC_ERROR([could not detect a 64-bit integer type])
+fi
+
+if test "$ac_cv_type_size_t" = "yes"; then
+ size_t_value="size_t"
+else
+ size_t_value="switch_int32_t"
+fi
+
+if test "$ac_cv_type_ssize_t" = "yes"; then
+ ssize_t_value="ssize_t"
+else
+ ssize_t_value="switch_int32_t"
+fi
+
+APR_CHECK_SIZEOF_EXTENDED([#include ], ssize_t, 8)
+
+if test "$ac_cv_sizeof_ssize_t" = "$ac_cv_sizeof_int"; then
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "d"'
+elif test "$ac_cv_sizeof_ssize_t" = "$ac_cv_sizeof_long"; then
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+else
+ ssize_t_fmt='#error Can not determine the proper size for ssize_t'
+fi
+
+APR_CHECK_SIZEOF_EXTENDED([#include ], size_t, 8)
+
+if test "$ac_cv_sizeof_size_t" = "$ac_cv_sizeof_int"; then
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "d"'
+elif test "$ac_cv_sizeof_size_t" = "$ac_cv_sizeof_long"; then
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+else
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "zu"'
+fi
+
+# Basically, we have tried to figure out the correct format strings
+# for SWITCH types which vary between platforms, but we don't always get
+# it right. If you find that we don't get it right for your platform,
+# you can override our decision below.
+# NOTE: borrowed much of this logic from apr.
+case $host in
+ s390*linux*)
+ # uniquely, the 31-bit Linux/s390 uses "unsigned long int"
+ # for size_t rather than "unsigned int":
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ ;;
+ *-os2*)
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ;;
+ *-openbsd*)
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *aix4*|*aix5*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *beos*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "ld"'
+ ;;
+ *apple-darwin*)
+ ssize_t_fmt='#define SWITCH_SSIZE_T_FMT "ld"'
+ size_t_fmt='#define SWITCH_SIZE_T_FMT "lu"'
+ ;;
+esac
+
+AC_SUBST(voidp_size)
+AC_SUBST(short_value)
+AC_SUBST(int_value)
+AC_SUBST(long_value)
+AC_SUBST(int64_value)
+AC_SUBST(size_t_value)
+AC_SUBST(ssize_t_value)
+AC_SUBST(int64_t_fmt)
+AC_SUBST(uint64_t_fmt)
+AC_SUBST(ssize_t_fmt)
+AC_SUBST(size_t_fmt)
+
+case $host in
+ *-openbsd*)
+ # OpenBSD's gunzip and friends don't like -d because its redundant, only gzip does
+ AC_PATH_PROGS(ZCAT, gzip)
+ ;;
+ *)
+ AC_PATH_PROGS(ZCAT, gunzip gzcat gzip zcat)
+ ;;
+esac
+
+AC_PATH_PROGS(BZIP, bzip2)
+AC_PATH_PROGS(XZ, xz)
+AC_PATH_PROGS(TAR, gtar tar)
+AC_PATH_PROGS(WGET, wget)
+AC_PATH_PROGS(CURL, curl)
+GETLIB="cd $switch_srcdir/libs && ${SHELL} $switch_builddir/build/getlib.sh"
+AC_SUBST(GETLIB)
+GETG729="cd $switch_srcdir/libs && ${SHELL} $switch_builddir/build/getg729.sh"
+AC_SUBST(GETG729)
+GETSOUNDS="${SHELL} $switch_builddir/build/getsounds.sh"
+AC_SUBST(GETSOUNDS)
+
+case $host in
+ *-darwin*)
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/curl/lib/pkgconfig
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/sqlite/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/ldns/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/portaudio/lib/pkgconfig/
+ path_push_unique PKG_CONFIG_PATH /usr/local/opt/ffmpeg/lib/pkgconfig/
+ ;;
+esac
+
+if ! (test -x "$PKG_CONFIG" || test -x "$(which pkg-config)"); then
+ AC_MSG_ERROR([You need to install pkg-config to configure FreeSWITCH.])
+fi
+
+# temporary workaround for Debian libldns-dev package bug
+if test "$cross_compiling" != "yes" && test -f /usr/lib/pkg-config/libldns.pc; then
+ path_push_unique PKG_CONFIG_PATH /usr/lib/pkg-config
+fi
+
+module_enabled() {
+ grep -v -e "\#" -e "^\$" modules.conf | sed 's/|.*//' | sed -e "s|^.*/||" | grep "^${1}\$" >/dev/null
+}
+
+AC_ARG_WITH(png,
+ [AS_HELP_STRING([--without-png],
+ [disable support for libpng])],
+ [with_png="$withval"],
+ [with_png="yes"])
+if test "$with_png" = "yes"; then
+ PKG_CHECK_MODULES([LIBPNG], [libpng >= 1.6.16],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ PKG_CHECK_MODULES([LIBPNG], [libpng16 >= 1.6.16],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ PKG_CHECK_MODULES([LIBPNG], [libpng >= 1.2.49],[
+ AM_CONDITIONAL([HAVE_PNG],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_PNG],[false])])])])
+else
+ AM_CONDITIONAL([HAVE_PNG],[false])
+fi
+
+AC_ARG_WITH(freetype,
+ [AS_HELP_STRING([--without-freetype],
+ [disable support for freetype])],
+ [with_freetype="$withval"],
+ [with_freetype="yes"])
+if test "$with_freetype" = "yes"; then
+ PKG_CHECK_MODULES([FREETYPE], [freetype2 >= 2.4.9],[
+ AM_CONDITIONAL([HAVE_FREETYPE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_FREETYPE],[false])])
+else
+ AM_CONDITIONAL([HAVE_FREETYPE],[false])
+fi
+
+PKG_CHECK_MODULES([GUMBO], [gumbo >= 0.10.1],[
+ AM_CONDITIONAL([HAVE_GUMBO],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GUMBO],[false])])
+
+PKG_CHECK_MODULES([FVAD], [libfvad >= 1.0],[
+ AM_CONDITIONAL([HAVE_FVAD],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_FVAD],[false])])
+
+PKG_CHECK_MODULES([TPL], [libtpl >= 1.5],[
+ AC_DEFINE([HAVE_LIBTPL],[1],[Define to 1 if you have libtpl])],[
+ AC_MSG_RESULT([no])])
+
+PKG_CHECK_MODULES([SQLITE], [sqlite3 >= 3.6.20])
+PKG_CHECK_MODULES([CURL], [libcurl >= 7.19])
+PKG_CHECK_MODULES([PCRE], [libpcre >= 7.8])
+PKG_CHECK_MODULES([SPEEX], [speex >= 1.2rc1 speexdsp >= 1.2rc1])
+PKG_CHECK_MODULES([YAML], [yaml-0.1 >= 0.1.4],[
+ AM_CONDITIONAL([HAVE_YAML],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_YAML],[false])])
+PKG_CHECK_MODULES([PORTAUDIO], [portaudio-2.0 >= 19],[
+ AM_CONDITIONAL([HAVE_PORTAUDIO],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_PORTAUDIO],[false])])
+PKG_CHECK_MODULES([LDNS], [libldns-fs >= 1.6.6],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])],[
+PKG_CHECK_MODULES([LDNS], [libldns >= 1.6.6],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])],[
+ AC_CHECK_LIB([ldns], [ldns_str2rdf_a], [LDNS_LIBS=-lldns])
+ AS_IF([test -z "$LDNS_LIBS"],[
+ if module_enabled mod_enum; then
+ AC_MSG_ERROR([You need to either install libldns-dev or disable mod_enum in modules.conf])
+ else
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LDNS],[false])
+ fi],[
+ AM_CONDITIONAL([HAVE_LDNS],[true])])])])
+PKG_CHECK_MODULES([SNDFILE], [sndfile >= 1.0.20],[
+ AM_CONDITIONAL([HAVE_SNDFILE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SNDFILE],[false])])
+
+PKG_CHECK_MODULES([MPG123], [libmpg123 >= 1.16.0],[
+ AM_CONDITIONAL([HAVE_MPG123],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MPG123],[false])])
+
+PKG_CHECK_MODULES([SHOUT], [shout >= 2.2.2],[
+ AM_CONDITIONAL([HAVE_SHOUT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SHOUT],[false])])
+
+mp3lame=false
+AC_CHECK_LIB([mp3lame], [lame_init],[
+ AC_CHECK_HEADER([lame/lame.h],[
+ mp3lame=true
+ AC_SUBST([MP3LAME_LIBS], [-lmp3lame])
+ AC_SUBST([MP3LAME_CFLAGS], [$CPPFLAGS])])])
+AM_CONDITIONAL([HAVE_MP3LAME],[$mp3lame])
+
+PKG_CHECK_MODULES([AVCODEC], [libavcodec >= 53.35.0],[
+ AM_CONDITIONAL([HAVE_AVCODEC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVCODEC],[false])])
+
+PKG_CHECK_MODULES([X264], [x264 >= 0.142.2431],[
+ AM_CONDITIONAL([HAVE_X264],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_X264],[false])])
+
+PKG_CHECK_MODULES([AVFORMAT], [libavformat >= 53.21.1],[
+ AM_CONDITIONAL([HAVE_AVFORMAT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVFORMAT],[false])])
+
+PKG_CHECK_MODULES([AVUTIL], [libavutil >= 54.3.0],[
+ AM_CONDITIONAL([HAVE_AVUTIL],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVUTIL],[false])])
+
+PKG_CHECK_MODULES([AVRESAMPLE], [libavresample >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_AVRESAMPLE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AVRESAMPLE],[false])])
+
+PKG_CHECK_MODULES([SWRESAMPLE], [libswresample >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_SWRESAMPLE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SWRESAMPLE],[false])])
+
+PKG_CHECK_MODULES([SWSCALE], [libswscale >= 3.0.0],[
+ AM_CONDITIONAL([HAVE_SWSCALE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SWSCALE],[false])])
+
+PKG_CHECK_MODULES([VLC], [libvlc >= 2.1.0],[
+ AM_CONDITIONAL([HAVE_VLC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_VLC],[false])])
+
+PKG_CHECK_MODULES([OPENCV], [opencv >= 2.4.5],[
+ AM_CONDITIONAL([HAVE_OPENCV],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPENCV],[false])])
+
+PKG_CHECK_MODULES([OPUSFILE_DECODE], [opusfile >= 0.5],[
+ AM_CONDITIONAL([HAVE_OPUSFILE_DECODE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUSFILE_DECODE],[false])])
+PKG_CHECK_MODULES([OPUSFILE_ENCODE], [libopusenc >= 0.1],[
+ AM_CONDITIONAL([HAVE_OPUSFILE_ENCODE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUSFILE_ENCODE],[false])])
+
+
+PKG_CHECK_MODULES([MAGICK], [ImageMagick >= 6.0.0],[
+ AM_CONDITIONAL([HAVE_MAGICK],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MAGICK],[false])])
+
+PKG_CHECK_MODULES([MAGICK7], [ImageMagick >= 7.0.0],[
+ AM_CONDITIONAL([HAVE_MAGICK7],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MAGICK7],[false])])
+
+PKG_CHECK_MODULES([SILK], [silk >= 1.0.8],[
+ AM_CONDITIONAL([HAVE_SILK],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SILK],[false])])
+
+PKG_CHECK_MODULES([BROADVOICE], [broadvoice >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROADVOICE],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROADVOICE],[false])])
+
+PKG_CHECK_MODULES([ILBC], [ilbc2 >= 0.0.1],[
+ AM_CONDITIONAL([HAVE_ILBC],[true])],[
+ PKG_CHECK_MODULES([ILBC], [ilbc >= 0.0.1],[
+ AM_CONDITIONAL([HAVE_ILBC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_ILBC],[false])])])
+
+PKG_CHECK_MODULES([G7221], [g722_1 >= 0.2.0],[
+ AM_CONDITIONAL([HAVE_G7221],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_G7221],[false])])
+
+codec2="true"
+PKG_CHECK_MODULES([CODEC2], [codec2 >= 0.5],[],[
+ AC_CHECK_LIB([codec2], [codec2_create],[
+ AC_CHECK_HEADERS([codec2/codec2.h],[
+ CODEC2_LIBS="-lcodec2"
+ CODEC2_CFLAGS=""
+ ], [
+ codec2="false"
+ if module_enabled mod_codec2; then
+ AC_MSG_ERROR([You must install libcodec2-dev to build mod_codec2])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ])
+ ])
+])
+
+AM_CONDITIONAL([HAVE_CODEC2],[$codec2])
+
+
+PKG_CHECK_MODULES([OPUS], [opus >= 1.1],[
+ AM_CONDITIONAL([HAVE_OPUS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_OPUS],[false])])
+
+PKG_CHECK_MODULES([SOUNDTOUCH], [soundtouch >= 1.7.0],[
+ AM_CONDITIONAL([HAVE_SOUNDTOUCH],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SOUNDTOUCH],[false])])
+
+flite="true"
+PKG_CHECK_MODULES([FLITE], [flite >= 2],[],[
+ AC_CHECK_LIB([flite], [flite_init],[
+ AC_CHECK_HEADERS([flite/flite.h],[
+ FLITE_LIBS="-lflite -lflite_cmu_grapheme_lang -lflite_cmu_grapheme_lex -lflite_cmu_indic_lang -lflite_cmu_indic_lex -lflite_cmulex -lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal16 -lflite_cmu_us_kal -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish"
+ FLITE_CFLAGS=""
+ ], [
+ flite="false"
+ if module_enabled mod_flite; then
+ AC_MSG_ERROR([You must install libflite-dev to build mod_flite])
+ else
+ AC_MSG_RESULT([no])
+ fi
+ ])
+ ])
+])
+
+AM_CONDITIONAL([HAVE_FLITE],[$flite])
+
+PKG_CHECK_MODULES([MONGOC], [libmongoc-1.0 >= 1.0.8],[
+ AM_CONDITIONAL([HAVE_MONGOC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_MONGOC],[false])])
+
+PKG_CHECK_MODULES([MEMCACHED], [libmemcached >= 0.31],[
+ AM_CONDITIONAL([HAVE_MEMCACHED],[true])
+ MEMCACHED_LIBS="${MEMCACHED_LIBS} -lpthread"
+ save_LIBS="${LIBS}"
+ save_CPPFLAGS="${CPPFLAGS}"
+ LIBS="${MEMCACHED_LIBS}"
+ CPPFLAGS="${MEMCACHED_CFLAGS}"
+ AC_CHECK_FUNCS([memcached_server_name memcached_stat_execute])
+ AC_CHECK_TYPES([memcached_instance_st*],,, [[#include ]])
+ LIBS="${save_LIBS}"
+ CPPFLAGS="${save_CPPFLAGS}"
+],[
+ AC_MSG_RESULT([no])
+ AM_CONDITIONAL([HAVE_MEMCACHED],[false])
+])
+
+PKG_CHECK_MODULES([V8FS_STATIC], [v8-6.1_static >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ PKG_CHECK_MODULES([V8FS_STATIC], [v8fs_static >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ PKG_CHECK_MODULES([V8FS_STATIC], [v8 >= 6.1.298],[
+ AM_CONDITIONAL([HAVE_V8FS],[true])],[
+ if module_enabled mod_v8; then
+ AC_MSG_ERROR([You need to either install libv8-6.1-dev (>= 6.1.298), libv8fs-dev (>= 6.1.298) or disable mod_v8 in modules.conf])
+ else
+ AC_MSG_RESULT([no])
+ AM_CONDITIONAL([HAVE_V8FS],[false])
+ fi
+ ])
+ ])
+])
+
+PKG_CHECK_MODULES([KS], [libks >= 1.1.0],[
+ AM_CONDITIONAL([HAVE_KS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_KS],[false])])
+
+PKG_CHECK_MODULES([SIGNALWIRE_CLIENT], [signalwire_client >= 1.0.0],[
+ AM_CONDITIONAL([HAVE_SIGNALWIRE_CLIENT],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SIGNALWIRE_CLIENT],[false])])
+
+PKG_CHECK_MODULES([AMQP], [librabbitmq >= 0.5.2],[
+ AM_CONDITIONAL([HAVE_AMQP],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_AMQP],[false])])
+
+PKG_CHECK_MODULES([H2O], [libh2o-evloop >= 0.11.0],[
+ AM_CONDITIONAL([HAVE_H2O],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_H2O],[false])])
+
+PKG_CHECK_MODULES([BROTLIENC], [libbrotlienc >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROTLIENC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROTLIENC],[false])])
+
+PKG_CHECK_MODULES([BROTLIDEC], [libbrotlidec >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_BROTLIDEC],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_BROTLIDEC],[false])])
+
+PKG_CHECK_MODULES([TAP], [tap >= 0.1.0],[
+ AM_CONDITIONAL([HAVE_TAP],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_TAP],[false])])
+
+PKG_CHECK_MODULES([SMPP34], [libsmpp34 >= 1.10],[
+ AM_CONDITIONAL([HAVE_SMPP34],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_SMPP34],[false])])
+
+PKG_CHECK_MODULES([HIREDIS], [hiredis >= 0.10.0],[
+ AM_CONDITIONAL([HAVE_HIREDIS],[true])],[
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_HIREDIS],[false])])
+
+AC_ARG_ENABLE(core-libedit-support,
+ [AS_HELP_STRING([--disable-core-libedit-support], [Compile without libedit Support])])
+
+AS_IF([test "x$enable_core_libedit_support" != "xno"],[
+ PKG_CHECK_MODULES([LIBEDIT], [libedit >= 2.11],,[
+ AC_MSG_RESULT([no])
+ AC_CHECK_LIB([edit], [el_line], [LIBEDIT_LIBS=-ledit])
+ AC_CHECK_LIB([edit], [el_cursor], [ac_cv_has_el_cursor=yes])
+ AC_CHECK_HEADER([histedit.h], [], [unset LIBEDIT_LIBS])
+ AS_IF([test "x$LIBEDIT_LIBS" = "x"], [
+ AC_MSG_ERROR([You need to either install libedit-dev (>= 2.11) or configure with --disable-core-libedit-support])
+ ])])])
+
+dnl DH: Added for including libwebsockets
+AC_ARG_WITH(lws,
+ [AS_HELP_STRING([--with-lws],
+ [enable support for libwebsockets])],
+ [with_lws="$withval"],
+ [with_lws="no"])
+if test "$with_lws" = "yes"; then
+ PKG_CHECK_MODULES([LWS], [libwebsockets], [
+ AM_CONDITIONAL([HAVE_LWS],[true])], [
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LWS],[false])])
+else
+ AM_CONDITIONAL([HAVE_LWS],[false])
+fi
+
+dnl DH: Added for including google protobuf libs
+AC_ARG_WITH(extra,
+ [AS_HELP_STRING([--with-extra],
+ [enable support for extra modules which require google rpc (libgrpc++ and libgrpc)])],
+ [with_extra="$withval"],
+ [with_extra="no"])
+if test "$with_extra" = "yes"; then
+ PKG_CHECK_MODULES([GRPC], [grpc++ grpc], [
+ AM_CONDITIONAL([HAVE_GRPC],[true])], [
+ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GRPC],[false])])
+else
+ AM_CONDITIONAL([HAVE_GRPC],[false])
+fi
+
+dnl ---------------------------------------------------------------------------
+dnl - OpenLDAP SDK
+dnl ---------------------------------------------------------------------------
+
+AC_CHECK_LIB(ldap, ldap_search, with_ldap=yes)
+dnl Check for other libraries we need to link with to get the main routines.
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes], , -llber) }
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes with_ldap_krb=yes], , -llber -lkrb) }
+test "$with_ldap" != "yes" && { AC_CHECK_LIB(ldap, ldap_open, [with_ldap=yes with_ldap_lber=yes with_ldap_krb=yes with_ldap_des=yes], , -llber -lkrb -ldes) }
+test "$with_ldap_lber" != "yes" && { AC_CHECK_LIB(lber, ber_pvt_opt_on, with_ldap_lber=yes) }
+
+if test "$with_ldap" = "yes"; then
+ if test "$with_ldap_des" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -ldes"
+ fi
+ if test "$with_ldap_krb" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -lkrb"
+ fi
+ if test "$with_ldap_lber" = "yes" ; then
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -llber"
+ fi
+ OPENLDAP_LIBS="${OPENLDAP_LIBS} -lldap"
+fi
+
+AM_CONDITIONAL([HAVE_LDAP],[test "x$with_ldap" = "xyes"])
+
+AC_SUBST(OPENLDAP_LIBS)
+
+AS_IF([test "x$enable_core_libedit_support" != "xno"], [
+ # If making changes here, don't forget to run autoheader and
+ # update libs/esl/src/include/esl_config_auto.h.in manually.
+ AC_DEFINE([HAVE_LIBEDIT], [1], [Define to 1 if you have libedit is available])
+if test x$ac_cv_has_el_cursor = xyes; then
+ AC_DEFINE([HAVE_EL_CURSOR], [1], [Define to 1 if you have libedit el_cursor support])
+fi
+ save_LIBS="${LIBS}"
+ save_CPPFLAGS="${CPPFLAGS}"
+ LIBS="${LIBEDIT_LIBS}"
+ CPPFLAGS="${LIBEDIT_CFLAGS}"
+ AC_CHECK_DECLS([EL_PROMPT_ESC, EL_REFRESH],,, [[#include ]])
+ AC_CHECK_FUNCS([el_wset])
+ LIBS="${save_LIBS}"
+ CPPFLAGS="${save_CPPFLAGS}"
+])
+
+SAC_OPENSSL
+
+if test x$HAVE_OPENSSL = x1; then
+ openssl_CFLAGS="$openssl_CFLAGS -DHAVE_OPENSSL";
+ APR_ADDTO(SWITCH_AM_CFLAGS, -DHAVE_OPENSSL)
+ AC_CHECK_LIB(ssl, SSL_CTX_set_tlsext_use_srtp, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLS_SRTP, 1, HAVE_OPENSSL_DTLS_SRTP), AC_MSG_ERROR([OpenSSL >= 1.0.1e and associated developement headers required]))
+ AC_CHECK_LIB(ssl, DTLSv1_method, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLS, 1, HAVE_OPENSSL_DTLS), AC_MSG_ERROR([OpenSSL >= 1.0.1e and associaed developement headers required]))
+ AC_CHECK_LIB(ssl, DTLSv1_2_method, AC_DEFINE_UNQUOTED(HAVE_OPENSSL_DTLSv1_2_method, 1, [DTLS version 1.2 is available]))
+else
+ AC_MSG_ERROR([OpenSSL >= 1.0.1e and associated developement headers required])
+fi
+
+AX_CHECK_JAVA
+
+AM_CONDITIONAL([HAVE_ODBC],[test "x$enable_core_odbc_support" != "xno"])
+AM_CONDITIONAL([HAVE_MYSQL],[test "$found_mysql" = "yes"])
+
+#
+# perl checks
+#
+
+AC_CHECK_PROG(PERL,perl,[ac_cv_have_perl=yes],[ac_cv_have_perl=no])
+
+# -a "x$ac_cv_have_EXTERN_h" != "xno"
+
+if test "x$ac_cv_have_perl" != "xno"; then
+ PERL=perl
+ PERL_SITEDIR="`$PERL -MConfig -e 'print $Config{archlib}'`"
+ PERL_LIBDIR="-L`$PERL -MConfig -e 'print $Config{archlib}'`/CORE"
+ PERL_LIBS="`$PERL -MConfig -e 'print $Config{libs}'`"
+ PERL_CFLAGS="-w -DMULTIPLICITY `$PERL -MExtUtils::Embed -e ccopts | sed -e 's|-arch x86_64 -arch i386||'` -DEMBED_PERL"
+ PERL_LDFLAGS="`$PERL -MExtUtils::Embed -e ldopts| sed -e 's|-arch x86_64 -arch i386||'`"
+ PERL_INC="`$PERL -MExtUtils::Embed -e perl_inc`"
+
+ save_CFLAGS="$CFLAGS"
+ CFLAGS="$PERL_CFLAGS"
+ AC_CHECK_HEADER([EXTERN.h], [ac_cv_have_EXTERN_h=yes], [ac_cv_have_EXTERN_h=no], [[#include
+# include ]])
+ CFLAGS="$save_CFLAGS"
+
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$PERL_LDFLAGS"
+ AC_CHECK_LIB([perl], [perl_alloc], ac_cv_use_libperl=yes, ac_cv_use_libperl=no)
+ LDFLAGS="$save_LDFLAGS"
+
+ AC_SUBST(PERL_SITEDIR)
+ AC_SUBST(PERL_LIBDIR)
+ AC_SUBST(PERL_LIBS)
+ AC_SUBST(PERL_CFLAGS)
+ AC_SUBST(PERL_LDFLAGS)
+ AC_SUBST(PERL_INC)
+fi
+
+AM_CONDITIONAL([HAVE_PERL],[test "x$ac_cv_have_perl" != "xno" -a "x$ac_cv_have_EXTERN_h" != "xno" -a "x$ac_cv_use_libperl" != "xno"])
+
+#
+# php checks
+#
+
+AC_CHECK_PROG(PHP,php,[ac_cv_have_php=yes],[ac_cv_have_php=no])
+AC_CHECK_PROG(PHP_CONFIG,php-config,[ac_cv_have_php_config=yes],[ac_cv_have_php_config=no])
+AM_CONDITIONAL([HAVE_PHP],[test "x$ac_cv_have_php" != "xno" -a "x$ac_cv_have_php_config" != "xno"])
+
+if test "x$ac_cv_have_php" != "xno" -a "x$ac_cv_have_php_config" != "xno"; then
+ PHP=php
+ PHP_CONFIG=php-config
+ PHP_LDFLAGS="`$PHP_CONFIG --ldflags`"
+ PHP_LIBS="`$PHP_CONFIG --libs | sed -r 's/ ?-l(bz2|pcre2-8|xml2|gssapi_krb5|krb5|k5crypto|com_err|history|z|readline|gmp|ssl|crypto|argon2|sodium)//g'`"
+ PHP_EXT_DIR="`$PHP_CONFIG --extension-dir`"
+ PHP_INC_DIR="`$PHP -r 'echo ini_get("include_path");' | cut -d: -f2`"
+ PHP_INI_DIR="`$PHP_CONFIG --configure-options | tr " " "\n" | grep -- --with-config-file-scan-dir | cut -f2 -d=`"
+ PHP_CFLAGS="`$PHP_CONFIG --includes`"
+ AC_SUBST(PHP_LDFLAGS)
+ AC_SUBST(PHP_LIBS)
+ AC_SUBST(PHP_EXT_DIR)
+ AC_SUBST(PHP_INC_DIR)
+ AC_SUBST(PHP_INI_DIR)
+ AC_SUBST(PHP_CFLAGS)
+fi
+
+#
+# Python checks for mod_python
+#
+AC_ARG_WITH(
+ [python],
+ [AS_HELP_STRING([--with-python], [Use system provided version of python (default: try)])],
+ [with_python="$withval"],
+ [with_python="try"]
+)
+
+if test "$with_python" != "no"
+then
+ save_CFLAGS="$CFLAGS"
+ save_LIBS="$LIBS"
+
+ if test "$with_python" != "yes" -a "$with_python" != "try" ; then
+ AC_MSG_CHECKING([for python])
+ if test ! -x "$with_python" ; then
+ AC_MSG_ERROR([Specified python does not exist or is not executable: $with_python])
+ fi
+ AC_MSG_RESULT([$with_python])
+ AC_SUBST([PYTHON], ["$with_python"])
+ else
+ AC_PATH_PROG([PYTHON], ["python"], ["no"], ["$PATH:/usr/bin:/usr/local/bin"])
+ fi
+
+ if test "$PYTHON" != "no" ; then
+ AC_MSG_CHECKING([python version])
+ PYTHON_VER="`$PYTHON -V 2>&1 | cut -d' ' -f2`"
+
+ if test -z "$PYTHON_VER" ; then
+ AC_MSG_ERROR([Unable to detect python version])
+ fi
+ AC_MSG_RESULT([$PYTHON_VER])
+
+ AC_MSG_CHECKING([for python distutils])
+ python_result="`$PYTHON -c 'import distutils;' 2>&1`"
+ if test -z "$python_result" ; then
+ python_has_distutils="yes"
+ else
+ python_has_distutils="no"
+ fi
+ AC_MSG_RESULT([$python_has_distutils])
+
+ if test "$python_has_distutils" != "no" ; then
+ AC_MSG_CHECKING([location of site-packages])
+
+ PYTHON_SITE_DIR="`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_python_lib(0));'`"
+
+ if test -z "$PYTHON_SITE_DIR" ; then
+ AC_MSG_ERROR([Unable to detect python site-packages path])
+ elif test ! -d "$PYTHON_SITE_DIR" ; then
+ AC_MSG_ERROR([Path $PYTHON_SITE_DIR returned by python does not exist!])
+ fi
+ AC_MSG_RESULT([$PYTHON_SITE_DIR])
+ AC_SUBST([PYTHON_SITE_DIR], [$PYTHON_SITE_DIR])
+
+ #
+ # python distutils found, get settings from python directly
+ #
+ PYTHON_CFLAGS="`$PYTHON -c 'from distutils import sysconfig; flags = [[\"-I\" + sysconfig.get_python_inc(0), \"-I\" + sysconfig.get_python_inc(1), \" \".join(sysconfig.get_config_var(\"CFLAGS\").split())]]; print(\" \".join(flags));' | sed -e 's/-arch i386//g;s/-arch x86_64//g'`"
+ PYTHON_LDFLAGS="`$PYTHON -c 'from distutils import sysconfig; libs = sysconfig.get_config_var(\"LIBS\").split() + sysconfig.get_config_var(\"SYSLIBS\").split(); libs.append(\"-lpython\"+sysconfig.get_config_var(\"VERSION\")); print(\" \".join(libs));'`"
+ PYTHON_LIB="`$PYTHON -c 'from distutils import sysconfig; print(\"python\" + sysconfig.get_config_var(\"VERSION\"));'`"
+ PYTHON_LIBDIR="`$PYTHON -c 'from distutils import sysconfig; print(sysconfig.get_config_var(\"LIBDIR\"));'`"
+
+ # handle python being installed into /usr/local
+ AC_MSG_CHECKING([python libdir])
+ if test -z "`echo $PYTHON_LIBDIR | grep "/usr/lib"`" ; then
+ PYTHON_LDFLAGS="-L$PYTHON_LIBDIR $PYTHON_LDFLAGS"
+ LIBS="-L$PYTHON_LIBDIR $LIBS"
+ fi
+ AC_MSG_RESULT([$PYTHON_LIBDIR])
+
+ # check libpython
+ AC_CHECK_LIB([$PYTHON_LIB], [main], [has_libpython="yes"], [has_libpython="no"])
+
+ if test "$has_libpython" = "no" ; then
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([$PYTHON_LIB is unusable])],
+ [AC_MSG_ERROR([$PYTHON_LIB is unusable])]
+ )
+ fi
+
+ # check whether system libpython is usable and has threads support
+ CFLAGS="$PYTHON_CFLAGS"
+ LIBS="$PYTHON_LDFLAGS"
+ AC_CHECK_FUNC([PyThread_init_thread], [python_has_threads="yes"], [python_has_threads="no"])
+
+ if test "$python_has_threads" = "no"; then
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Your python lacks threads support, can not build mod_python])],
+ [AC_MSG_ERROR([Your python lacks threads support, can not build mod_python])]
+ )
+ else
+ AC_MSG_NOTICE([Your python seems OK, do not forget to enable mod_python in modules.conf])
+ AC_SUBST([PYTHON_CFLAGS], [$PYTHON_CFLAGS])
+ AC_SUBST([PYTHON_LDFLAGS], [$PYTHON_LDFLAGS])
+ fi
+ else
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Could not find or use python distutils module: $python_result])],
+ [AC_MSG_ERROR([Could not find or use python distutils module: $python_result])]
+ )
+ fi
+
+ LIBS="$save_LIBS"
+ CFLAGS="$save_CFLAGS"
+
+ unset python_has_threads
+ unset python_has_distutils
+ unset python_result
+ else
+ AS_IF([test "$with_python" = "try"],
+ [AC_MSG_WARN([Could not find python, mod_python will not build, use --with-python to specify the location])],
+ [AC_MSG_ERROR([Could not find python, use --with-python to specify the location])]
+ )
+ fi
+else
+ AC_MSG_WARN([python support disabled, building mod_python will fail!])
+fi
+
+#
+# SNMP checks for mod_snmp
+#
+AC_PATH_PROG([NET_SNMP_CONFIG], [net-snmp-config], [no])
+if test "$NET_SNMP_CONFIG" != "no"; then
+ AC_MSG_CHECKING([for Net-SNMP libraries via net-snmp-config])
+ SNMP_LIBS="`$NET_SNMP_CONFIG --base-agent-libs`"
+else
+ # net-snmp-config not in path, fallback to sensible defaults
+ SNMP_LIBS="-lnetsnmpmibs -lnetsnmpagent -lnetsnmp"
+fi
+
+# fix linking error on Solaris patched Net-SNMP
+AS_CASE([$host], [*-solaris2*], [AC_CHECK_LIB([dladm], [dladm_open], [SNMP_LIBS="$SNMP_LIBS -ldladm"])])
+AC_SUBST(SNMP_LIBS)
+
+CHECK_ERLANG
+
+# Enable clang address sanitizer bit build
+AC_ARG_ENABLE(address_sanitizer,
+ [AC_HELP_STRING([--enable-address-sanitizer],[build with address sanitizer])],
+ [enable_address_sanitizer="$enable_address_sanitizer"],
+ [enable_address_sanitizer="no"])
+
+if test "${enable_address_sanitizer}" = "yes"; then
+ APR_ADDTO(CFLAGS, -fsanitize=address -fno-omit-frame-pointer -fstack-protector-strong)
+ APR_ADDTO(CXXFLAGS, -fsanitize=address -fno-omit-frame-pointer -fstack-protector-strong)
+ APR_ADDTO(LDFLAGS, -fsanitize=address)
+fi
+
+AC_ARG_ENABLE(,
+ [AC_HELP_STRING([--enable-pool-sanitizer],[build with sanitizer friendly pool behavior])],
+ [enable_pool_sanitizer="$enable_pool_sanitizer"],
+ [enable_pool_sanitizer="no"])
+
+if test "${enable_pool_sanitizer}" = "yes"; then
+ APR_ADDTO(CFLAGS, -DDESTROY_POOLS)
+ ac_configure_args="$ac_configure_args --enable-pool-debug=yes"
+fi
+
+# we never use this, and hard setting it will make cross compile work better
+ac_cv_file_dbd_apr_dbd_mysql_c=no
+
+AC_CONFIG_FILES([Makefile
+ build/Makefile
+ tests/unit/Makefile
+ src/Makefile
+ src/mod/Makefile
+ src/mod/applications/mod_audio_fork/Makefile
+ src/mod/applications/mod_aws_lex/Makefile
+ src/mod/applications/mod_aws_transcribe/Makefile
+ src/mod/applications/mod_azure_transcribe/Makefile
+ src/mod/applications/mod_deepgram_transcribe/Makefile
+ src/mod/applications/mod_google_tts/Makefile
+ src/mod/applications/mod_google_transcribe/Makefile
+ src/mod/applications/mod_ibm_transcribe/Makefile
+ src/mod/applications/mod_jambonz_transcribe/Makefile
+ src/mod/applications/mod_nuance_transcribe/Makefile
+ src/mod/applications/mod_nvidia_transcribe/Makefile
+ src/mod/applications/mod_soniox_transcribe/Makefile
+ src/mod/applications/mod_dialogflow/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
+ src/mod/applications/mod_blacklist/Makefile
+ src/mod/applications/mod_callcenter/Makefile
+ src/mod/applications/mod_cidlookup/Makefile
+ src/mod/applications/mod_cluechoo/Makefile
+ src/mod/applications/mod_commands/Makefile
+ src/mod/applications/mod_conference/Makefile
+ src/mod/applications/mod_curl/Makefile
+ src/mod/applications/mod_cv/Makefile
+ src/mod/applications/mod_db/Makefile
+ src/mod/applications/mod_directory/Makefile
+ src/mod/applications/mod_distributor/Makefile
+ src/mod/applications/mod_dptools/Makefile
+ src/mod/applications/mod_easyroute/Makefile
+ src/mod/applications/mod_enum/Makefile
+ src/mod/applications/mod_esf/Makefile
+ src/mod/applications/mod_esl/Makefile
+ src/mod/applications/mod_expr/Makefile
+ src/mod/applications/mod_fifo/Makefile
+ src/mod/applications/mod_fsk/Makefile
+ src/mod/applications/mod_fsv/Makefile
+ src/mod/applications/mod_hash/Makefile
+ src/mod/applications/mod_hiredis/Makefile
+ src/mod/applications/mod_httapi/Makefile
+ src/mod/applications/mod_http_cache/Makefile
+ src/mod/applications/mod_ladspa/Makefile
+ src/mod/applications/mod_lcr/Makefile
+ src/mod/applications/mod_limit/Makefile
+ src/mod/applications/mod_memcache/Makefile
+ src/mod/applications/mod_mongo/Makefile
+ src/mod/applications/mod_mp4/Makefile
+ src/mod/applications/mod_mp4v2/Makefile
+ src/mod/applications/mod_nibblebill/Makefile
+ src/mod/applications/mod_oreka/Makefile
+ src/mod/applications/mod_osp/Makefile
+ src/mod/applications/mod_prefix/Makefile
+ src/mod/applications/mod_rad_auth/Makefile
+ src/mod/applications/mod_random/Makefile
+ src/mod/applications/mod_redis/Makefile
+ src/mod/applications/mod_rss/Makefile
+ src/mod/applications/mod_skel/Makefile
+ src/mod/applications/mod_signalwire/Makefile
+ src/mod/applications/mod_sms/Makefile
+ src/mod/applications/mod_sms_flowroute/Makefile
+ src/mod/applications/mod_snapshot/Makefile
+ src/mod/applications/mod_snom/Makefile
+ src/mod/applications/mod_sonar/Makefile
+ src/mod/applications/mod_soundtouch/Makefile
+ src/mod/applications/mod_spandsp/Makefile
+ src/mod/applications/mod_spy/Makefile
+ src/mod/applications/mod_stress/Makefile
+ src/mod/applications/mod_test/Makefile
+ src/mod/applications/mod_translate/Makefile
+ src/mod/applications/mod_valet_parking/Makefile
+ src/mod/applications/mod_vmd/Makefile
+ src/mod/applications/mod_voicemail/Makefile
+ src/mod/applications/mod_voicemail_ivr/Makefile
+ src/mod/asr_tts/mod_cepstral/Makefile
+ src/mod/asr_tts/mod_flite/Makefile
+ src/mod/asr_tts/mod_pocketsphinx/Makefile
+ src/mod/asr_tts/mod_tts_commandline/Makefile
+ src/mod/asr_tts/mod_unimrcp/Makefile
+ src/mod/codecs/mod_amr/Makefile
+ src/mod/codecs/mod_amrwb/Makefile
+ src/mod/codecs/mod_b64/Makefile
+ src/mod/codecs/mod_bv/Makefile
+ src/mod/codecs/mod_clearmode/Makefile
+ src/mod/codecs/mod_codec2/Makefile
+ src/mod/codecs/mod_com_g729/Makefile
+ src/mod/codecs/mod_dahdi_codec/Makefile
+ src/mod/codecs/mod_g723_1/Makefile
+ src/mod/codecs/mod_g729/Makefile
+ src/mod/codecs/mod_h26x/Makefile
+ src/mod/codecs/mod_ilbc/Makefile
+ src/mod/codecs/mod_isac/Makefile
+ src/mod/codecs/mod_mp4v/Makefile
+ src/mod/codecs/mod_opus/Makefile
+ src/mod/codecs/mod_openh264/Makefile
+ src/mod/codecs/mod_sangoma_codec/Makefile
+ src/mod/codecs/mod_silk/Makefile
+ src/mod/codecs/mod_siren/Makefile
+ src/mod/codecs/mod_skel_codec/Makefile
+ src/mod/codecs/mod_theora/Makefile
+ src/mod/databases/mod_mariadb/Makefile
+ src/mod/databases/mod_pgsql/Makefile
+ src/mod/dialplans/mod_dialplan_asterisk/Makefile
+ src/mod/dialplans/mod_dialplan_directory/Makefile
+ src/mod/dialplans/mod_dialplan_xml/Makefile
+ src/mod/directories/mod_ldap/Makefile
+ src/mod/endpoints/mod_alsa/Makefile
+ src/mod/endpoints/mod_dingaling/Makefile
+ src/mod/endpoints/mod_gsmopen/Makefile
+ src/mod/endpoints/mod_h323/Makefile
+ src/mod/endpoints/mod_khomp/Makefile
+ src/mod/endpoints/mod_loopback/Makefile
+ src/mod/endpoints/mod_opal/Makefile
+ src/mod/endpoints/mod_portaudio/Makefile
+ src/mod/endpoints/mod_reference/Makefile
+ src/mod/endpoints/mod_rtmp/Makefile
+ src/mod/endpoints/mod_skinny/Makefile
+ src/mod/endpoints/mod_sofia/Makefile
+ src/mod/endpoints/mod_unicall/Makefile
+ src/mod/endpoints/mod_rtc/Makefile
+ src/mod/endpoints/mod_verto/Makefile
+ src/mod/event_handlers/mod_amqp/Makefile
+ src/mod/event_handlers/mod_cdr_csv/Makefile
+ src/mod/event_handlers/mod_cdr_mongodb/Makefile
+ src/mod/event_handlers/mod_cdr_pg_csv/Makefile
+ src/mod/event_handlers/mod_cdr_sqlite/Makefile
+ src/mod/event_handlers/mod_erlang_event/Makefile
+ src/mod/event_handlers/mod_event_multicast/Makefile
+ src/mod/event_handlers/mod_event_socket/Makefile
+ src/mod/event_handlers/mod_event_test/Makefile
+ src/mod/event_handlers/mod_fail2ban/Makefile
+ src/mod/event_handlers/mod_format_cdr/Makefile
+ src/mod/event_handlers/mod_json_cdr/Makefile
+ src/mod/event_handlers/mod_kazoo/Makefile
+ src/mod/event_handlers/mod_radius_cdr/Makefile
+ src/mod/event_handlers/mod_odbc_cdr/Makefile
+ src/mod/event_handlers/mod_rayo/Makefile
+ src/mod/event_handlers/mod_smpp/Makefile
+ src/mod/event_handlers/mod_snmp/Makefile
+ src/mod/event_handlers/mod_event_zmq/Makefile
+ src/mod/formats/mod_imagick/Makefile
+ src/mod/formats/mod_local_stream/Makefile
+ src/mod/formats/mod_native_file/Makefile
+ src/mod/formats/mod_opusfile/Makefile
+ src/mod/formats/mod_png/Makefile
+ src/mod/formats/mod_shell_stream/Makefile
+ src/mod/formats/mod_shout/Makefile
+ src/mod/formats/mod_sndfile/Makefile
+ src/mod/formats/mod_ssml/Makefile
+ src/mod/formats/mod_tone_stream/Makefile
+ src/mod/formats/mod_vlc/Makefile
+ src/mod/formats/mod_portaudio_stream/Makefile
+ src/mod/languages/mod_java/Makefile
+ src/mod/languages/mod_lua/Makefile
+ src/mod/languages/mod_managed/Makefile
+ src/mod/languages/mod_perl/Makefile
+ src/mod/languages/mod_python/Makefile
+ src/mod/languages/mod_v8/Makefile
+ src/mod/languages/mod_yaml/Makefile
+ src/mod/languages/mod_basic/Makefile
+ src/mod/loggers/mod_console/Makefile
+ src/mod/loggers/mod_graylog2/Makefile
+ src/mod/loggers/mod_logfile/Makefile
+ src/mod/loggers/mod_syslog/Makefile
+ src/mod/loggers/mod_raven/Makefile
+ src/mod/say/mod_say_de/Makefile
+ src/mod/say/mod_say_en/Makefile
+ src/mod/say/mod_say_es/Makefile
+ src/mod/say/mod_say_es_ar/Makefile
+ src/mod/say/mod_say_fa/Makefile
+ src/mod/say/mod_say_fr/Makefile
+ src/mod/say/mod_say_he/Makefile
+ src/mod/say/mod_say_hr/Makefile
+ src/mod/say/mod_say_hu/Makefile
+ src/mod/say/mod_say_it/Makefile
+ src/mod/say/mod_say_ja/Makefile
+ src/mod/say/mod_say_nl/Makefile
+ src/mod/say/mod_say_pl/Makefile
+ src/mod/say/mod_say_pt/Makefile
+ src/mod/say/mod_say_ru/Makefile
+ src/mod/say/mod_say_sv/Makefile
+ src/mod/say/mod_say_th/Makefile
+ src/mod/say/mod_say_zh/Makefile
+ src/mod/timers/mod_posix_timer/Makefile
+ src/mod/timers/mod_timerfd/Makefile
+ src/mod/xml_int/mod_xml_cdr/Makefile
+ src/mod/xml_int/mod_xml_curl/Makefile
+ src/mod/xml_int/mod_xml_ldap/Makefile
+ src/mod/xml_int/mod_xml_radius/Makefile
+ src/mod/xml_int/mod_xml_rpc/Makefile
+ src/mod/xml_int/mod_xml_scgi/Makefile
+ src/mod/applications/mod_av/Makefile
+ src/mod/applications/mod_video_filter/Makefile
+ src/include/switch_am_config.h
+ build/getsounds.sh
+ build/getlib.sh
+ build/getg729.sh
+ build/freeswitch.pc
+ build/standalone_module/freeswitch.pc
+ build/modmake.rules
+ libs/esl/Makefile
+ libs/esl/perl/Makefile
+ libs/esl/php/Makefile
+ libs/xmlrpc-c/include/xmlrpc-c/config.h
+ libs/xmlrpc-c/xmlrpc_config.h
+ libs/xmlrpc-c/config.mk
+ libs/xmlrpc-c/srcdir.mk
+ libs/xmlrpc-c/stamp-h
+ scripts/gentls_cert])
+
+AM_CONDITIONAL(ISLINUX, [test `uname -s` = Linux])
+AM_CONDITIONAL(ISMAC, [test `uname -s` = Darwin])
+AM_CONDITIONAL(ISFREEBSD, [test `uname -s` = FreeBSD])
+AM_CONDITIONAL(IS64BITLINUX, [test `uname -m` = x86_64])
+
+AM_CONDITIONAL(HAVE_G723_1, [ test -d ${switch_srcdir}/libs/libg723_1 ])
+AM_CONDITIONAL(HAVE_G729, [ test -d ${switch_srcdir}/libs/libg729 ])
+
+#some vars to sub into the Makefile.am's
+#LIBS+=> core.log || error="yes";if test -n "$(VERBOSE)" -o "$$error" = "yes";then cat core.log;fi;if test "$$error" = "yes";then exit 1;fi
+LIBTOOL='$(SHELL) $(switch_builddir)/libtool'
+TOUCH_TARGET='if test -f "$@";then touch "$@";fi;'
+CONF_MODULES='$$(grep -v "\#" $(switch_builddir)/modules.conf | sed "s/|.*//" | sed -e "s|^.*/||" | sort | uniq )'
+CONF_DISABLED_MODULES='$$(grep "\#" $(switch_builddir)/modules.conf | grep -v "\#\#" | sed "s/|.*//" | sed -e "s|^.*/||" | sort | uniq )'
+OUR_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-all ; done )"; echo $$mods )'
+OUR_CLEAN_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-clean ; done )"; echo $$mods )'
+OUR_INSTALL_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-install ; done)"; echo $$mods )'
+OUR_UNINSTALL_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-uninstall ; done)"; echo $$mods )'
+OUR_TEST_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-print_tests ; done )"; echo $$mods )'
+OUR_CHECK_MODS='$$(if test -z "$(MODULES)" ; then tmp_mods="$(CONF_MODULES)"; else tmp_mods="$(MODULES)" ; fi ; mods="$$(for i in $$tmp_mods ; do echo $$i-check ; done )"; echo $$mods )'
+OUR_DISABLED_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-all ; done )"; echo $$mods )'
+OUR_DISABLED_CLEAN_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-clean ; done )"; echo $$mods )'
+OUR_DISABLED_INSTALL_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-install ; done)"; echo $$mods )'
+OUR_DISABLED_UNINSTALL_MODS='$$(tmp_mods="$(CONF_DISABLED_MODULES)"; mods="$$(for i in $$tmp_mods ; do echo $$i-uninstall ; done)"; echo $$mods )'
+
+#AM_MAKEFLAGS='"OUR_MODULES=$(OUR_MODS)" "OUR_CLEAN_MODULES=$(OUR_CLEAN_MODS)" "OUR_INSTALL_MODULES=$(OUR_INSTALL_MODS)" "OUR_UNINSTALL_MODULES=$(OUR_UNINSTALL_MODS)" "OUR_DISABLED_MODULES=$(OUR_DISABLED_MODS)" "OUR_DISABLED_CLEAN_MODULES=$(OUR_DISABLED_CLEAN_MODS)" "OUR_DISABLED_INSTALL_MODULES=$(OUR_DISABLED_INSTALL_MODS)" "OUR_DISABLED_UNINSTALL_MODULES=$(OUR_DISABLED_UNINSTALL_MODS)" `test -n "$(VERBOSE)" || echo -s`'
+#AM_MAKEFLAGS='`test -n "$(VERBOSE)" || echo -s`'
+AC_SUBST(LIBTOOL)
+AC_SUBST(TOUCH_TARGET)
+AC_SUBST(CONF_DISABLED_MODULES)
+AC_SUBST(CONF_MODULES)
+
+AC_SUBST(OUR_MODS)
+AC_SUBST(OUR_CLEAN_MODS)
+AC_SUBST(OUR_TEST_MODS)
+AC_SUBST(OUR_CHECK_MODS)
+AC_SUBST(OUR_INSTALL_MODS)
+AC_SUBST(OUR_UNINSTALL_MODS)
+AC_SUBST(OUR_DISABLED_MODS)
+AC_SUBST(OUR_DISABLED_CLEAN_MODS)
+AC_SUBST(OUR_DISABLED_INSTALL_MODS)
+AC_SUBST(OUR_DISABLED_UNINSTALL_MODS)
+AC_SUBST(AM_MAKEFLAGS)
+
+ac_configure_args="$ac_configure_args --with-modinstdir=${modulesdir} CONFIGURE_CFLAGS='$CFLAGS $CPPFLAGS' CONFIGURE_CXXFLAGS='$CXXFLAGS $CPPFLAGS' CONFIGURE_LDFLAGS='$LDFLAGS' "
+
+# --prefix='$prefix' --exec_prefix='$exec_prefix' --libdir='$libdir' --libexecdir='$libexecdir' --bindir='$bindir' --sbindir='$sbindir' \
+# --localstatedir='$localstatedir' --datadir='$datadir'"
+
+# Run configure in all the subdirs
+AC_CONFIG_SUBDIRS([libs/srtp])
+if test "$use_system_apr" != "yes"; then
+ AC_CONFIG_SUBDIRS([libs/apr])
+fi
+if test "$use_system_aprutil" != "yes"; then
+ AC_CONFIG_SUBDIRS([libs/apr-util])
+fi
+AC_CONFIG_SUBDIRS([libs/iksemel])
+AC_CONFIG_SUBDIRS([libs/libdingaling])
+AC_CONFIG_SUBDIRS([libs/freetdm])
+AC_CONFIG_SUBDIRS([libs/unimrcp])
+if test "x${enable_zrtp}" = "xyes"; then
+ AC_CONFIG_SUBDIRS([libs/libzrtp])
+fi
+
+case $host in
+ *-openbsd*|*-netbsd*)
+ # libtool won't link static libs against shared ones on NetBSD/OpenBSD unless we tell it not to be stupid
+ AC_CONFIG_COMMANDS([hacklibtool], [cp libtool libtool.orig && sed -e "s/deplibs_check_method=.*/deplibs_check_method=pass_all/g" libtool.orig > libtool])
+ ;;
+esac
+
+
+AC_OUTPUT
+
+##
+## Configuration summary
+##
+
+echo
+echo "-------------------------- FreeSWITCH configuration --------------------------"
+echo ""
+echo " Locations:"
+echo ""
+echo " prefix: ${prefix}"
+echo " exec_prefix: ${exec_prefix}"
+echo " bindir: ${bindir}"
+echo " confdir: ${confdir}"
+echo " libdir: ${libdir}"
+echo " datadir: ${datadir}"
+echo " localstatedir: ${localstatedir}"
+echo " includedir: ${includedir}"
+echo ""
+echo " certsdir: ${certsdir}"
+echo " dbdir: ${dbdir}"
+echo " grammardir: ${grammardir}"
+echo " htdocsdir: ${htdocsdir}"
+echo " fontsdir: ${fontsdir}"
+echo " logfiledir: ${logfiledir}"
+echo " modulesdir: ${modulesdir}"
+echo " pkgconfigdir: ${pkgconfigdir}"
+echo " recordingsdir: ${recordingsdir}"
+echo " imagesdir: ${imagesdir}"
+echo " runtimedir: ${runtimedir}"
+echo " scriptdir: ${scriptdir}"
+echo " soundsdir: ${soundsdir}"
+echo " storagedir: ${storagedir}"
+echo " cachedir: ${cachedir}"
+echo ""
+echo "------------------------------------------------------------------------------"
diff --git a/packer/jambonz-mini/proxmox/files/configure.ac.grpc.patch b/packer/jambonz-mini/proxmox/files/configure.ac.grpc.patch
new file mode 100644
index 0000000..f030a1b
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/configure.ac.grpc.patch
@@ -0,0 +1,33 @@
+--- configure.ac 2019-10-22 22:47:40.566582350 +0000
++++ configure.ac.new 2019-10-23 14:56:29.469206772 +0000
+@@ -1563,6 +1563,20 @@
+ AM_CONDITIONAL([HAVE_LWS],[false])
+ fi
+
+++dnl DH: Added for including google protobuf libs
+++AC_ARG_WITH(grpc,
++ [AS_HELP_STRING([--with-grpc],
++ [enable support for google rpc (libgrpc++ and libgrpc)])],
++ [with_grpc="$withval"],
++ [with_grpc="no"])
++if test "$with_grpc" = "yes"; then
++ PKG_CHECK_MODULES([GRPC], [grpc++ grpc], [
++ AM_CONDITIONAL([HAVE_GRPC],[true])], [
++ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_GRPC],[false])])
++else
++ AM_CONDITIONAL([HAVE_GRPC],[false])
++fi
++
+ dnl ---------------------------------------------------------------------------
+ dnl - OpenLDAP SDK
+ dnl ---------------------------------------------------------------------------
+@@ -1858,6 +1872,9 @@
+ src/Makefile
+ src/mod/Makefile
+ src/mod/applications/mod_audio_fork/Makefile
++ src/mod/applications/mod_google_tts/Makefile
++ src/mod/applications/mod_google_transcribe/Makefile
++ src/mod/applications/mod_dialogflow/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
diff --git a/packer/jambonz-mini/proxmox/files/configure.ac.patch b/packer/jambonz-mini/proxmox/files/configure.ac.patch
new file mode 100644
index 0000000..f1baabc
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/configure.ac.patch
@@ -0,0 +1,40 @@
+--- configure.ac 2019-09-30 19:01:33.308021065 +0000
++++ configure.ac.new 2019-09-30 23:00:53.730843843 +0000
+@@ -13,7 +13,7 @@
+ AC_CONFIG_FILES([src/include/switch_version.h.in:src/include/switch_version.h.template])
+
+ AC_CONFIG_AUX_DIR(build/config)
+-AM_INIT_AUTOMAKE
++AM_INIT_AUTOMAKE([subdir-objects])
+ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+ AC_CONFIG_SRCDIR([src/switch.c])
+ AC_CONFIG_HEADER([src/include/switch_private.h])
+@@ -1549,6 +1549,20 @@
+ AC_MSG_ERROR([You need to either install libedit-dev (>= 2.11) or configure with --disable-core-libedit-support])
+ ])])])
+
++dnl DH: Added for including libwebsockets
++AC_ARG_WITH(lws,
++ [AS_HELP_STRING([--with-lws],
++ [enable support for libwebsockets])],
++ [with_lws="$withval"],
++ [with_lws="no"])
++if test "$with_lws" = "yes"; then
++ PKG_CHECK_MODULES([LWS], [libwebsockets], [
++ AM_CONDITIONAL([HAVE_LWS],[true])], [
++ AC_MSG_RESULT([no]); AM_CONDITIONAL([HAVE_LWS],[false])])
++else
++ AM_CONDITIONAL([HAVE_LWS],[false])
++fi
++
+ dnl ---------------------------------------------------------------------------
+ dnl - OpenLDAP SDK
+ dnl ---------------------------------------------------------------------------
+@@ -1843,6 +1857,7 @@
+ tests/unit/Makefile
+ src/Makefile
+ src/mod/Makefile
++ src/mod/applications/mod_audio_fork/Makefile
+ src/mod/applications/mod_abstraction/Makefile
+ src/mod/applications/mod_avmd/Makefile
+ src/mod/applications/mod_bert/Makefile
diff --git a/packer/jambonz-mini/proxmox/files/drachtio-5070.conf.xml b/packer/jambonz-mini/proxmox/files/drachtio-5070.conf.xml
new file mode 100644
index 0000000..f7ac6ae
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/drachtio-5070.conf.xml
@@ -0,0 +1,31 @@
+
+
+
+ 0.0.0.0
+
+
+
+
+
+ 8192
+
+
+
+ false
+
+
+
+
+ /var/log/drachtio/drachtio-5070.log
+ /var/log/drachtio/archive
+ 100
+ 10000
+ true
+
+
+ 3
+
+ info
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/drachtio-5070.service b/packer/jambonz-mini/proxmox/files/drachtio-5070.service
new file mode 100644
index 0000000..32a6ada
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/drachtio-5070.service
@@ -0,0 +1,28 @@
+
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStart=/usr/local/bin/drachtio --daemon -f /etc/drachtio-5070.conf.xml --contact sip:*:5070;transport=udp,tcp --address 0.0.0.0 --port 9023
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/proxmox/files/drachtio-fail2ban.conf b/packer/jambonz-mini/proxmox/files/drachtio-fail2ban.conf
new file mode 100644
index 0000000..bce54d4
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/drachtio-fail2ban.conf
@@ -0,0 +1,18 @@
+# Fail2Ban filter for drachtio spammer detection
+#
+
+[INCLUDES]
+
+# Read common prefixes. If any customizations available -- read them from
+# common.local
+before = common.conf
+
+[Definition]
+
+_daemon = drachtio
+
+__pid_re = (?:\[\d+\])
+
+failregex = detected potential spammer from :\d+
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/drachtio.conf.xml b/packer/jambonz-mini/proxmox/files/drachtio.conf.xml
new file mode 100644
index 0000000..52a0c1f
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/drachtio.conf.xml
@@ -0,0 +1,35 @@
+
+
+
+ 127.0.0.1
+
+
+ http://127.0.0.1:4000
+
+
+
+
+
+
+ 8192
+
+
+
+ false
+
+
+
+
+ /var/log/drachtio/drachtio.log
+ /var/log/drachtio/archive
+ 100
+ 10000
+ false
+
+
+ 3
+
+ info
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/drachtio.service b/packer/jambonz-mini/proxmox/files/drachtio.service
new file mode 100644
index 0000000..d5b1fdc
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/drachtio.service
@@ -0,0 +1,32 @@
+[Unit]
+Description=drachtio
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=$$(/bin/ip -4 addr show eth0 | grep -oP "(?<=inet )\d+(\.\d+){3}" | head -n 1)'
+ExecStartPre=/bin/sh -c "systemctl set-environment PUBLIC_IP=`/usr/bin/curl -s http://ipecho.net/plain`"
+ExecStart=/usr/local/bin/drachtio --daemon \
+--contact sip:${PUBLIC_IP};transport=udp,tcp \
+--contact sip:${LOCAL_IP};transport=udp,tcp \
+--address 0.0.0.0 --port 9022 --homer 127.0.0.1:9060 --homer-id 10
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/proxmox/files/ecosystem.config.js b/packer/jambonz-mini/proxmox/files/ecosystem.config.js
new file mode 100644
index 0000000..275f5bd
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/ecosystem.config.js
@@ -0,0 +1,297 @@
+module.exports = {
+ apps : [
+ {
+ name: 'jambonz-webapp',
+ script: 'npm',
+ cwd: '/home/admin/apps/jambonz-webapp',
+ args: 'run serve'
+ },
+ {
+ name: 'jambonz-smpp-esme',
+ cwd: '/home/admin/apps/jambonz-smpp-esme',
+ script: 'app.js',
+ out_file: '/home/admin/.pm2/logs/jambonz-smpp-esme.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-smpp-esme.log',
+ combine_logs: true,
+ instance_var: 'INSTANCE_ID',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '2G',
+ env: {
+ NODE_ENV: 'production',
+ HTTP_PORT: 3020,
+ AVOID_UDH: true,
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info'
+ }
+ },
+ {
+ name: 'jambonz-api-server',
+ cwd: '/home/admin/apps/jambonz-api-server',
+ script: 'app.js',
+ out_file: '/home/admin/.pm2/logs/jambonz-api-server.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-api-server.log',
+ combine_logs: true,
+ instance_var: 'INSTANCE_ID',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ AUTHENTICATION_KEY: 'JWT-SECRET-GOES_HERE',
+ JWT_SECRET: 'JWT-SECRET-GOES_HERE',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONE_API_VERSION: 'v1',
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ HTTP_PORT: 3002,
+ JAEGER_BASE_URL: 'http://127.0.0.1:16686',
+ HOMER_BASE_URL: 'http://127.0.0.1:9080',
+ HOMER_USERNAME: 'admin',
+ HOMER_PASSWORD: 'sipcapture'
+ }
+ },
+ {
+ name: 'sbc-call-router',
+ cwd: '/home/admin/apps/sbc-call-router',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ combine_logs: true,
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-call-router.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-call-router.log',
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ HTTP_PORT: 4000,
+ JAMBONES_INBOUND_ROUTE: '127.0.0.1:4002',
+ JAMBONES_OUTBOUND_ROUTE: '127.0.0.1:4003',
+ JAMBONZ_TAGGED_INBOUND: 1,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32'
+ }
+ },
+ {
+ name: 'sbc-sip-sidecar',
+ cwd: '/home/admin/apps/sbc-sip-sidecar',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-sip-sidecar.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_LOGLEVEL: 'info',
+ RTPENGINE_PING_INTERVAL: 30000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1
+ }
+ },
+ {
+ name: 'sbc-outbound',
+ cwd: '/home/admin/apps/sbc-outbound',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-outbound.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-outbound.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ MIN_CALL_LIMIT: 9999,
+ RTPENGINE_PING_INTERVAL: 30000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_RTPENGINE_UDP_PORT: 6000,
+ JAMBONES_RTPENGINES: '127.0.0.1:22222',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ JAMBONES_TRACK_ACCOUNT_CALLS: 0,
+ JAMBONES_TRACK_SP_CALLS: 0,
+ JAMBONES_TRACK_APP_CALLS: 0,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ MS_TEAMS_FQDN: ''
+ }
+ },
+ {
+ name: 'sbc-inbound',
+ cwd: '/home/admin/apps/sbc-inbound',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-inbound.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-inbound.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_LOGLEVEL: 'info',
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9022,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_RTPENGINE_UDP_PORT: 7000,
+ JAMBONES_RTPENGINES: '127.0.0.1:22222',
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ JAMBONES_TRACK_ACCOUNT_CALLS: 0,
+ JAMBONES_TRACK_SP_CALLS: 0,
+ JAMBONES_TRACK_APP_CALLS: 0,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ MS_TEAMS_SIP_PROXY_IPS: '52.114.148.0, 52.114.132.46, 52.114.75.24, 52.114.76.76, 52.114.7.24, 52.114.14.70'
+ }
+ },
+ {
+ name: 'sbc-rtpengine-sidecar',
+ cwd: '/home/admin/apps/sbc-rtpengine-sidecar',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-sbc-rtpengine-sidecar.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-sbc-rtpengine-sidecar.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ max_memory_restart: '1G',
+ env: {
+ NODE_ENV: 'production',
+ LOGLEVEL: 'info',
+ DTMF_ONLY: true,
+ RTPENGINE_DTMF_LOG_PORT: 22223,
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1
+ }
+ },
+ {
+ name: 'jambonz-feature-server',
+ cwd: '/home/admin/apps/jambonz-feature-server',
+ script: 'app.js',
+ instance_var: 'INSTANCE_ID',
+ out_file: '/home/admin/.pm2/logs/jambonz-feature-server.log',
+ err_file: '/home/admin/.pm2/logs/jambonz-feature-server.log',
+ combine_logs: true,
+ exec_mode: 'fork',
+ instances: 1,
+ autorestart: true,
+ watch: false,
+ env: {
+ NODE_ENV: 'production',
+ AUTHENTICATION_KEY: 'JWT-SECRET-GOES_HERE',
+ JWT_SECRET: 'JWT-SECRET-GOES_HERE',
+ JAMBONES_GATHER_EARLY_HINTS_MATCH: 1,
+ JAMBONES_OTEL_ENABLED: 1,
+ OTEL_EXPORTER_JAEGER_ENDPOINT: 'http://localhost:14268/api/traces',
+ OTEL_EXPORTER_OTLP_METRICS_INSECURE: 1,
+ OTEL_EXPORTER_JAEGER_GRPC_INSECURE: 1,
+ OTEL_TRACES_SAMPLER: 'parentbased_traceidratio',
+ OTEL_TRACES_SAMPLER_ARG: 1.0,
+ VMD_HINTS_FILE: '/home/admin/apps/jambonz-feature-server/data/example-voicemail-greetings.json',
+ ENABLE_METRICS: 1,
+ STATS_HOST: '127.0.0.1',
+ STATS_PORT: 8125,
+ STATS_PROTOCOL: 'tcp',
+ STATS_TELEGRAF: 1,
+ AWS_REGION: 'AWS_REGION_NAME',
+ JAMBONES_NETWORK_CIDR: 'PRIVATE_IP/32',
+ JAMBONES_API_BASE_URL: '--JAMBONES_API_BASE_URL--',
+ JAMBONES_GATHER_EARLY_HINTS_MATCH: 1,
+ JAMBONES_MYSQL_HOST: '127.0.0.1',
+ JAMBONES_MYSQL_USER: 'admin',
+ JAMBONES_MYSQL_PASSWORD: 'JambonzR0ck$',
+ JAMBONES_MYSQL_DATABASE: 'jambones',
+ JAMBONES_MYSQL_CONNECTION_LIMIT: 10,
+ JAMBONES_REDIS_HOST: '127.0.0.1',
+ JAMBONES_REDIS_PORT: 6379,
+ JAMBONES_LOGLEVEL: 'info',
+ JAMBONES_TIME_SERIES_HOST: '127.0.0.1',
+ HTTP_PORT: 3000,
+ DRACHTIO_HOST: '127.0.0.1',
+ DRACHTIO_PORT: 9023,
+ DRACHTIO_SECRET: 'cymru',
+ JAMBONES_SBCS: 'PRIVATE_IP',
+ JAMBONES_FREESWITCH: '127.0.0.1:8021:JambonzR0ck$',
+ SMPP_URL: 'http://PRIVATE_IP:3020'
+ }
+ }
+]
+};
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/event_socket.conf.xml b/packer/jambonz-mini/proxmox/files/event_socket.conf.xml
new file mode 100644
index 0000000..075dd8e
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/event_socket.conf.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/freeswitch.service b/packer/jambonz-mini/proxmox/files/freeswitch.service
new file mode 100644
index 0000000..a40b612
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/freeswitch.service
@@ -0,0 +1,35 @@
+
+[Unit]
+Description=freeswitch
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+PIDFile=/usr/local/freeswitch/run/freeswitch.pid
+EnvironmentFile=-/etc/default/freeswitch
+Environment="MOD_AUDIO_FORK_SUBPROTOCOL_NAME=audio.jambonz.org"
+Environment="MOD_AUDIO_FORK_SERVICE_THREADS=1"
+Environment="MOD_AUDIO_FORK_BUFFER_SECS=3"
+Environment="LD_LIBRARY_PATH=/usr/local/lib"
+Environment="GOOGLE_APPLICATION_CREDENTIALS=/home/admin/credentials/gcp.json"
+ExecStart=/usr/local/freeswitch/bin/freeswitch -nc -nonat
+TimeoutSec=45s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/freeswitch_log_rotation b/packer/jambonz-mini/proxmox/files/freeswitch_log_rotation
new file mode 100644
index 0000000..6028983
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/freeswitch_log_rotation
@@ -0,0 +1,26 @@
+#!/bin/bash
+# logrotate replacement script
+# source : http://wiki.fusionpbx.com/index.php?title=RotateFSLogs
+# put in /etc/cron.daily
+# don't forget to make it executable
+# you might consider changing /usr/local/freeswitch/conf/autoload_configs/logfile.conf.xml
+#
+
+#number of days of logs to keep
+NUMBERDAYS=5
+FSPATH=/usr/local/freeswitch/
+
+$FSPATH/bin/fs_cli -x "fsctl send_sighup" |grep '+OK' >/tmp/rotateFSlogs
+if [ $? -eq 0 ]; then
+ #-cmin 2 could bite us (leave some files uncompressed, eg 11M auto-rotate). Maybe -1440 is better?
+ find $FSPATH/log/ -name "freeswitch.log.*" -cmin -2 -exec gzip {} \;
+ find $FSPATH/log/ -name "freeswitch.log.*.gz" -mtime +$NUMBERDAYS -exec /bin/rm {} \;
+ chown www-data.www-data $FSPATH/log/freeswitch.log
+ chmod 660 $FSPATH/log/freeswitch.log
+ logger FreeSWITCH Logs rotated
+ /bin/rm /tmp/rotateFSlogs
+else
+ logger FreeSWITCH Log Rotation Script FAILED
+ mail -s '$HOST FS Log Rotate Error' root < /tmp/rotateFSlogs
+ /bin/rm /tmp/rotateFSlogs
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/grafana-dashboard-default.yaml b/packer/jambonz-mini/proxmox/files/grafana-dashboard-default.yaml
new file mode 100644
index 0000000..34d3347
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/grafana-dashboard-default.yaml
@@ -0,0 +1,8 @@
+apiVersion: 1
+
+providers:
+ - name: Default
+ type: file
+ folder: 'jambonz'
+ options:
+ path: /var/lib/grafana/dashboards
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/grafana-dashboard-heplify.json b/packer/jambonz-mini/proxmox/files/grafana-dashboard-heplify.json
new file mode 100644
index 0000000..f3a003a
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/grafana-dashboard-heplify.json
@@ -0,0 +1,1097 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": 2,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_kpi_rrd",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "*"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RRD",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_packets_lost",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP Packet Loss",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_kpi_srd",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "*"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "SRD",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_jitter",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP Jitter",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "type"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_packets_total",
+ "orderByTime": "ASC",
+ "policy": "60s",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "counter"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "HEPlify Packets Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 14
+ },
+ "hiddenSeries": false,
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_rtcp_dlsr",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "RTCP DLSR",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "type"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_packets_size",
+ "orderByTime": "ASC",
+ "policy": "60s",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "gauge"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "HEPlify Packets Total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "datasource": "InfluxDB",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 180
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 12,
+ "options": {
+ "displayMode": "gradient",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "mean"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "7.3.1",
+ "targets": [
+ {
+ "alias": "$tag_method -> $tag_response",
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "method"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "response"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "heplify_method_response",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "counter"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Methods and Responses",
+ "type": "bargauge"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "HEPlify Metrics",
+ "uid": "HO0OhLtGk",
+ "version": 1
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/grafana-dashboard-jambonz.json b/packer/jambonz-mini/proxmox/files/grafana-dashboard-jambonz.json
new file mode 100644
index 0000000..5f76694
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/grafana-dashboard-jambonz.json
@@ -0,0 +1,2683 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 0
+ },
+ "id": 9,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.5.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT sum(\"last_value\") FROM (SELECT last(\"value\") AS \"last_value\" FROM \"sbc_sip_calls_count\" WHERE $timeFilter GROUP BY time($__interval),\"host\", \"instance_id\" fill(null)) GROUP BY time($__interval) fill(null)",
+ "queryType": "randomWalk",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Current Calls",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 4,
+ "y": 0
+ },
+ "id": 10,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.5.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_media_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT sum(\"last_value\") FROM (SELECT last(\"value\") AS \"last_value\" FROM \"sbc_media_calls_count\" WHERE $timeFilter GROUP BY time($__interval), \"host\" fill(null)) GROUP BY time($__interval) fill(null)",
+ "queryType": "randomWalk",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "last"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Current Media Streams",
+ "type": "stat"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 18,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:309",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:310",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "alias": "Active media sessions (SBCs)",
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 13,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "linear"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_media_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Media Streams",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:256",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:257",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 8,
+ "x": 0,
+ "y": 3
+ },
+ "id": 39,
+ "options": {
+ "displayLabels": [],
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "sipStatus"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_terminations",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Inbound Response Codes",
+ "type": "piechart"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "direction",
+ "operator": "=",
+ "value": "inbound"
+ }
+ ]
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Total Inbound Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:362",
+ "decimals": 0,
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:363",
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "fs_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Feature Server Calls",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:468",
+ "decimals": 0,
+ "format": "short",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:469",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 10
+ },
+ "id": 20,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "average",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_hook_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "90th percentile",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_hook_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "90_percentile"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Webhook Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 10
+ },
+ "id": 43,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_rtpengine_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Rtpengine command response time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 10
+ },
+ "id": 41,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "app_mysql_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "MySQL Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 15
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_vendor",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "vendor"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_response_time",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "mean"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "TTS Response Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 15
+ },
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "total requests",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "vendor"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_cache_requests",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "served from cache",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "0"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "tts_cache_requests",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"value\") FROM \"tts_cache_requests\" WHERE (\"found\" = 'yes') AND $timeFilter GROUP BY time($__interval) fill(null)",
+ "rawQuery": false,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "found",
+ "operator": "=",
+ "value": "yes"
+ }
+ ]
+ }
+ ],
+ "title": "TTS Cache Hits",
+ "type": "timeseries"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "9.5.1",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "fs_media_channels_in_use",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Freeswitch Channels",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:1035",
+ "decimals": 0,
+ "format": "none",
+ "logBase": 1,
+ "min": "0",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:1036",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false
+ }
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 0,
+ "y": 20
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_invites",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "SBC Invites",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "short"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 20
+ },
+ "id": 19,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "previous"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_sip_calls_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "max"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "direction",
+ "operator": "=",
+ "value": "outbound"
+ }
+ ]
+ }
+ ],
+ "title": "Total Outbound Calls",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 20
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "none"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "sbc_users_count",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "count"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "title": "Active registrations",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 22,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "mem",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "available_percent"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "% free memory",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 1,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decgbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 31,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "exe"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "procstat",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "memory_usage"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "Memory By Process",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 1,
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 23,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "system",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "load1"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "Load avg",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "decimals": 0,
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percent"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 37,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "influxdb",
+ "uid": "PD39DF8CE8C1D829D"
+ },
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "host"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "exe"
+ ],
+ "type": "tag"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "measurement": "procstat",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "queryType": "randomWalk",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "cpu_usage"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": [
+ {
+ "key": "role",
+ "operator": "=",
+ "value": "mini"
+ }
+ ]
+ }
+ ],
+ "title": "CPU Usage By Process",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-12h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Jambonz Metrics",
+ "uid": "oAM51epMz",
+ "version": 1,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/grafana-dashboard-servers.json b/packer/jambonz-mini/proxmox/files/grafana-dashboard-servers.json
new file mode 100644
index 0000000..e48d48f
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/grafana-dashboard-servers.json
@@ -0,0 +1,5121 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "InfluxDB dashboards for telegraf metrics",
+ "editable": true,
+ "gnetId": 5955,
+ "graphTooltip": 1,
+ "id": 4,
+ "iteration": 1604669735342,
+ "links": [],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 65058,
+ "panels": [],
+ "title": "Quick overview",
+ "type": "row"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "s",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 4,
+ "x": 0,
+ "y": 1
+ },
+ "id": 65078,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"uptime_format\") AS \"value\" FROM \"system\" WHERE \"host\" =~ /$server$/ AND $timeFilter GROUP BY time($interval)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "Uptime",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 4,
+ "y": 1
+ },
+ "id": 65079,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"disk\" WHERE (\"host\" =~ /^$server$/ AND \"path\" = '/') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "Root FS Used",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 6,
+ "y": 1
+ },
+ "id": 65080,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"load5\") FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "3,7,10",
+ "title": "LA (Medium)",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 8,
+ "y": 1
+ },
+ "id": 65081,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"n_cpus\") AS \"mean_n_cpus\" FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null) ",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "",
+ "title": "CPUs",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 10,
+ "y": 1
+ },
+ "id": 65082,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"usage_idle\") * -1 + 100 FROM \"cpu\" WHERE (\"host\" =~ /^$server$/ AND \"cpu\" = 'cpu-total') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "CPU usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 12,
+ "y": 1
+ },
+ "id": 65083,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"mem\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "RAM usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 2,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 14,
+ "y": 1
+ },
+ "id": 65084,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT mean(\"usage_iowait\") FROM \"cpu\" WHERE (\"host\" =~ /^$server$/ AND \"cpu\" = 'cpu-total') AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "70,80,90",
+ "title": "IOWait",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 16,
+ "y": 1
+ },
+ "id": 65085,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT last(\"total\") FROM \"processes\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "1,5,10",
+ "title": "Processes",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 18,
+ "y": 1
+ },
+ "id": 65086,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT last(\"total_threads\") FROM \"processes\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "1,5,10",
+ "title": "Threads",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 20,
+ "y": 1
+ },
+ "id": 65087,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"used_percent\") FROM \"swap\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "50,70,90",
+ "title": "Swap Usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "#299c46",
+ "rgba(237, 129, 40, 0.89)",
+ "#d44a3a"
+ ],
+ "datasource": "InfluxDB-Telegraf",
+ "decimals": null,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 2,
+ "x": 22,
+ "y": 1
+ },
+ "id": 65088,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": true
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT last(\"n_users\") FROM \"system\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": "20,50",
+ "title": "Users",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 4
+ },
+ "id": 65060,
+ "panels": [],
+ "title": "SYSTEM - CPU, Memory, Disk",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 5
+ },
+ "hiddenSeries": false,
+ "id": 12054,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/mem_total/",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "mem_inactive",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(total) as total, mean(used) as used, mean(cached) as cached, mean(free) as free, mean(buffered) as buffered FROM \"mem\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "hiddenSeries": false,
+ "id": 65092,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(usage_user) as \"user\", mean(usage_system) as \"system\", mean(usage_softirq) as \"softirq\", mean(usage_steal) as \"steal\", mean(usage_nice) as \"nice\", mean(usage_irq) as \"irq\", mean(usage_iowait) as \"iowait\", mean(usage_guest) as \"guest\", mean(usage_guest_nice) as \"guest_nice\" FROM \"cpu\" WHERE \"host\" =~ /$server$/ and cpu = 'cpu-total' AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": 100,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "hiddenSeries": false,
+ "id": 54694,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": null,
+ "sortDesc": null,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "system_load1",
+ "policy": "default",
+ "query": "SELECT mean(load1) as load1,mean(load5) as load5,mean(load15) as load15 FROM \"system\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), * ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Load",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "hiddenSeries": false,
+ "id": 65089,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/cpu/"
+ },
+ {
+ "alias": "/avg/"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_cpu",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT 100 - mean(\"usage_idle\") FROM \"cpu\" WHERE (\"cpu\" =~ /cpu[0-9].*/ AND \"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"cpu\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU usage per core",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 28239,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(running) as running, mean(blocked) as blocked, mean(sleeping) as sleeping, mean(stopped) as stopped, mean(zombies) as zombies, mean(paging) as paging, mean(unknown) as unknown FROM \"processes\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Processes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 65097,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "hide": false,
+ "measurement": "cpu_percentageBusy",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(context_switches),1s)as \"context switches\" FROM \"kernel\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Context Switches",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ops",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 40
+ },
+ "id": 65096,
+ "panels": [],
+ "title": "Disk",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 41
+ },
+ "hiddenSeries": false,
+ "id": 52240,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.total",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_path : $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(total) AS \"total\", mean(used) as \"used\", mean(free) as \"free\" FROM \"disk\" WHERE \"host\" =~ /$server$/ AND \"path\" = '/' AND $timeFilter GROUP BY time($interval), \"host\", \"path\"",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Root Disk usage (/)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 41
+ },
+ "hiddenSeries": false,
+ "id": 65090,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.used_percent",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_path ($tag_fstype on $tag_device)",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"used_percent\") AS \"used_percent\" FROM \"disk\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"path\", \"device\", \"fstype\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "All partitions usage (%)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "percent",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 33458,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/used/",
+ "color": "#BF1B00",
+ "zindex": 3
+ },
+ {
+ "alias": "/free/",
+ "bars": false,
+ "fill": 0,
+ "lines": true,
+ "linewidth": 1
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_path : $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_inodes_free",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(inodes_total) as \"total\", mean(inodes_free) as \"free\", mean(inodes_used) as \"used\" FROM \"disk\" WHERE \"host\" =~ /$server$/ AND \"path\" = '/' AND $timeFilter GROUP BY time($interval), \"host\", \"path\"",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Root (/) Disk inodes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 10,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "hiddenSeries": false,
+ "id": 65091,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "disk.used_percent",
+ "color": "#BF1B00",
+ "fill": 0,
+ "linewidth": 2,
+ "zindex": 3
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_path ($tag_fstype on $tag_device)",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "disk_total",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"inodes_free\") AS \"free\" FROM \"disk\" WHERE (\"host\" =~ /^$server$/) AND $timeFilter GROUP BY time($interval), \"path\", \"device\", \"fstype\" fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "All partitions Inodes (Free)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "id": 61850,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/total/",
+ "fill": 0
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "swap_in",
+ "policy": "default",
+ "query": "SELECT mean(free) as \"free\", mean(used) as \"used\", mean(total) as \"total\" FROM \"swap\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Swap usage (bytes)",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 57
+ },
+ "id": 26024,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/in/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "swap_in",
+ "policy": "default",
+ "query": "SELECT mean(\"in\") as \"in\", mean(\"out\") as \"out\" FROM \"swap\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Swap I/O bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 13782,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(reads),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "\nSELECT non_negative_derivative(mean(writes),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O requests",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "iops",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 56720,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/read/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(read_time),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(write_time),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O time",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 73
+ },
+ "id": 60200,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/read/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(read_bytes),1s) as \"read\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_name: $col",
+ "dsType": "influxdb",
+ "function": "mean",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "path",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "io_reads",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(write_bytes),1s) as \"write\" FROM \"diskio\" WHERE \"host\" =~ /$server$/ AND \"name\" =~ /$disk$/ AND $timeFilter GROUP BY time($interval), *",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Disk I/O bytes",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 81
+ },
+ "id": 65059,
+ "panels": [],
+ "title": "Network",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 82
+ },
+ "id": 42026,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/ in$/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(bytes_recv),1s)*8 as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(bytes_sent),1s)*8 as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bps",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 90
+ },
+ "id": 28572,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "/ in$/",
+ "transform": "negative-Y"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(packets_recv), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(packets_sent), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), * fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network Packets",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "pps",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 98
+ },
+ "id": 65093,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(tcp_close) as CLOSED, mean(tcp_close_wait) as CLOSE_WAIT, mean(tcp_closing) as CLOSING, mean(tcp_established) as ESTABLISHED, mean(tcp_fin_wait1) as FIN_WAIT1, mean(tcp_fin_wait2) as FIN_WAIT2, mean(tcp_last_ack) as LAST_ACK, mean(tcp_syn_recv) as SYN_RECV, mean(tcp_syn_sent) as SYN_SENT, mean(tcp_time_wait) as TIME_WAIT FROM \"netstat\" WHERE host =~ /$server$/ AND $timeFilter GROUP BY time($interval), host ORDER BY asc",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "TCP connections",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 105
+ },
+ "id": 65094,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": true,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_outrsts), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_activeopens), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_estabresets), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "C",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "groupBy": [
+ {
+ "params": [
+ "$__interval"
+ ],
+ "type": "time"
+ },
+ {
+ "params": [
+ "null"
+ ],
+ "type": "fill"
+ }
+ ],
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(tcp_passiveopens), 1s) FROM \"net\" WHERE \"host\" =~ /^$server$/ AND $timeFilter GROUP BY time($interval) fill(null)",
+ "rawQuery": true,
+ "refId": "D",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "TCP handshake issues",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 112
+ },
+ "id": 58901,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(drop_in), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(drop_out), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Packets Drop",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets drop",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "InfluxDB-Telegraf",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "grid": {},
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 112
+ },
+ "id": 50643,
+ "interval": "$inter",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": false,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pluginVersion": "7.3.1",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(err_in), 1s) as \"in\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ },
+ {
+ "alias": "$tag_host: $tag_interface: $col",
+ "dsType": "influxdb",
+ "function": "derivative",
+ "groupBy": [
+ {
+ "interval": "auto",
+ "params": [
+ "auto"
+ ],
+ "type": "time"
+ },
+ {
+ "key": "host",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ },
+ {
+ "key": "interface",
+ "params": [
+ "tag"
+ ],
+ "type": "tag"
+ }
+ ],
+ "measurement": "net_bytes_recv",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT non_negative_derivative(mean(err_out), 1s) as \"out\" FROM \"net\" WHERE host =~ /$server/ AND interface =~ /$interface/ AND $timeFilter GROUP BY time($interval), host,interface fill(none)",
+ "rawQuery": true,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "value"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [],
+ "type": "mean"
+ }
+ ]
+ ],
+ "tags": []
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Packets Error",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Packets drop",
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 26,
+ "style": "dark",
+ "tags": [
+ "influxdb",
+ "telegraf"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allFormat": "glob",
+ "current": {
+ "selected": false,
+ "text": "InfluxDB",
+ "value": "InfluxDB"
+ },
+ "datasource": "InfluxDB-Telegraf",
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "",
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "influxdb",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": true,
+ "tags": [],
+ "text": [
+ "ip-172-31-33-65"
+ ],
+ "value": [
+ "ip-172-31-33-65"
+ ]
+ },
+ "datasource": "InfluxDB-Telegraf",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Server",
+ "multi": true,
+ "name": "server",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM system WITH KEY=host",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "auto": true,
+ "auto_count": 100,
+ "auto_min": "30s",
+ "current": {
+ "selected": false,
+ "text": "auto",
+ "value": "$__auto_interval_inter"
+ },
+ "datasource": null,
+ "error": null,
+ "hide": 0,
+ "includeAll": false,
+ "label": "Interval",
+ "multi": false,
+ "name": "inter",
+ "options": [
+ {
+ "selected": true,
+ "text": "auto",
+ "value": "$__auto_interval_inter"
+ },
+ {
+ "selected": false,
+ "text": "1s",
+ "value": "1s"
+ },
+ {
+ "selected": false,
+ "text": "5s",
+ "value": "5s"
+ },
+ {
+ "selected": false,
+ "text": "10s",
+ "value": "10s"
+ },
+ {
+ "selected": false,
+ "text": "15s",
+ "value": "15s"
+ },
+ {
+ "selected": false,
+ "text": "30s",
+ "value": "30s"
+ },
+ {
+ "selected": false,
+ "text": "1m",
+ "value": "1m"
+ },
+ {
+ "selected": false,
+ "text": "10m",
+ "value": "10m"
+ },
+ {
+ "selected": false,
+ "text": "30m",
+ "value": "30m"
+ },
+ {
+ "selected": false,
+ "text": "1h",
+ "value": "1h"
+ },
+ {
+ "selected": false,
+ "text": "6h",
+ "value": "6h"
+ },
+ {
+ "selected": false,
+ "text": "12h",
+ "value": "12h"
+ },
+ {
+ "selected": false,
+ "text": "1d",
+ "value": "1d"
+ },
+ {
+ "selected": false,
+ "text": "7d",
+ "value": "7d"
+ },
+ {
+ "selected": false,
+ "text": "14d",
+ "value": "14d"
+ },
+ {
+ "selected": false,
+ "text": "30d",
+ "value": "30d"
+ },
+ {
+ "selected": false,
+ "text": "60d",
+ "value": "60d"
+ },
+ {
+ "selected": false,
+ "text": "90d",
+ "value": "90d"
+ }
+ ],
+ "query": "1s,5s,10s,15s,30s,1m,10m,30m,1h,6h,12h,1d,7d,14d,30d,60d,90d",
+ "refresh": 2,
+ "skipUrlSync": false,
+ "type": "interval"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "CPU",
+ "multi": true,
+ "name": "cpu",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"cpu\" WITH KEY = \"cpu\" WHERE host =~ /$server/",
+ "refresh": 1,
+ "regex": "^cpu[0-9].*",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "disk",
+ "multi": true,
+ "name": "disk",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"disk\" WITH KEY = \"device\"",
+ "refresh": 1,
+ "regex": "/[a-z]d[\\D]$/",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": true
+ },
+ {
+ "allValue": null,
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": "$datasource",
+ "definition": "",
+ "error": null,
+ "hide": 0,
+ "includeAll": true,
+ "label": "interface",
+ "multi": true,
+ "name": "interface",
+ "options": [],
+ "query": "SHOW TAG VALUES FROM \"net\" WITH KEY = \"interface\" WHERE host =~ /$server/",
+ "refresh": 1,
+ "regex": "^(?!.*veth|all|tap).*",
+ "skipUrlSync": false,
+ "sort": 1,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": true
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "Telegraf - system metrics",
+ "uid": "zBFM0ohGz",
+ "version": 1
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/grafana-datasource.yml b/packer/jambonz-mini/proxmox/files/grafana-datasource.yml
new file mode 100644
index 0000000..e3d21b4
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/grafana-datasource.yml
@@ -0,0 +1,34 @@
+# config file version
+apiVersion: 1
+
+# list of datasources that should be deleted from the database
+deleteDatasources:
+ - name: InfluxDB
+ orgId: 1
+ - name: InfluxDB-Telegraf
+ orgId: 1
+
+# list of datasources to insert/update depending
+# whats available in the database
+datasources:
+- name: InfluxDB
+ type: influxdb
+ access: proxy
+ database: homer
+ user: grafana
+ url: http://127.0.0.1:8086
+ jsonData:
+ timeInterval: "15s"
+ # allow users to edit datasources from the UI.
+ editable: true
+
+- name: InfluxDB-Telegraf
+ type: influxdb
+ access: proxy
+ database: telegraf
+ user: grafana
+ url: http://127.0.0.1:8086
+ jsonData:
+ timeInterval: "15s"
+ # allow users to edit datasources from the UI.
+ editable: true
diff --git a/packer/jambonz-mini/proxmox/files/initialize-webapp-userdata.sh b/packer/jambonz-mini/proxmox/files/initialize-webapp-userdata.sh
new file mode 100644
index 0000000..0dc482d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/initialize-webapp-userdata.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+PRIVATE_IPV4="$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)"
+PUBLIC_IPV4="$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)"
+echo "REACT_APP_API_BASE_URL=http://${PUBLIC_IPV4}/api/v1" > /home/admin/apps/jambonz-webapp/.env
+cd /home/admin/apps/jambonz-webapp && sudo npm install --unsafe-perm && npm run build
+
+# update ecosystem.config.js with private ip
+sudo sed -i -e "s/\(.*\)PRIVATE_IP\(.*\)/\1${PRIVATE_IPV4}\2/g" /home/admin/apps/ecosystem.config.js
+sudo -u admin bash -c "pm2 restart /home/admin/apps/ecosystem.config.js"
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/jaeger.service b/packer/jambonz-mini/proxmox/files/jaeger.service
new file mode 100644
index 0000000..cd45cfc
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/jaeger.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=jaeger service unit file.
+After=syslog.target network.target local-fs.target
+
+[Service]
+Type=exec
+ExecStart=/usr/local/bin/jaeger-all-in-one
+
+[Install]
+WantedBy=multi-user.target
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/jambones-sql.sql b/packer/jambonz-mini/proxmox/files/jambones-sql.sql
new file mode 100644
index 0000000..3f7a2f7
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/jambones-sql.sql
@@ -0,0 +1,271 @@
+/* SQLEditor (MySQL (2))*/
+
+SET FOREIGN_KEY_CHECKS=0;
+
+DROP TABLE IF EXISTS call_routes;
+
+DROP TABLE IF EXISTS lcr_carrier_set_entry;
+
+DROP TABLE IF EXISTS lcr_routes;
+
+DROP TABLE IF EXISTS api_keys;
+
+DROP TABLE IF EXISTS ms_teams_tenants;
+
+DROP TABLE IF EXISTS sbc_addresses;
+
+DROP TABLE IF EXISTS users;
+
+DROP TABLE IF EXISTS phone_numbers;
+
+DROP TABLE IF EXISTS sip_gateways;
+
+DROP TABLE IF EXISTS voip_carriers;
+
+DROP TABLE IF EXISTS accounts;
+
+DROP TABLE IF EXISTS applications;
+
+DROP TABLE IF EXISTS service_providers;
+
+DROP TABLE IF EXISTS webhooks;
+
+CREATE TABLE call_routes
+(
+call_route_sid CHAR(36) NOT NULL UNIQUE ,
+priority INTEGER NOT NULL,
+account_sid CHAR(36) NOT NULL,
+regex VARCHAR(255) NOT NULL,
+application_sid CHAR(36) NOT NULL,
+PRIMARY KEY (call_route_sid)
+) COMMENT='a regex-based pattern match for call routing';
+
+CREATE TABLE lcr_routes
+(
+lcr_route_sid CHAR(36),
+regex VARCHAR(32) NOT NULL COMMENT 'regex-based pattern match against dialed number, used for LCR routing of PSTN calls',
+description VARCHAR(1024),
+priority INTEGER NOT NULL UNIQUE COMMENT 'lower priority routes are attempted first',
+PRIMARY KEY (lcr_route_sid)
+) COMMENT='Least cost routing table';
+
+CREATE TABLE api_keys
+(
+api_key_sid CHAR(36) NOT NULL UNIQUE ,
+token CHAR(36) NOT NULL UNIQUE ,
+account_sid CHAR(36),
+service_provider_sid CHAR(36),
+expires_at TIMESTAMP NULL DEFAULT NULL,
+last_used TIMESTAMP NULL DEFAULT NULL,
+created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+PRIMARY KEY (api_key_sid)
+) COMMENT='An authorization token that is used to access the REST api';
+
+CREATE TABLE ms_teams_tenants
+(
+ms_teams_tenant_sid CHAR(36) NOT NULL UNIQUE ,
+service_provider_sid CHAR(36) NOT NULL,
+account_sid CHAR(36) NOT NULL,
+application_sid CHAR(36),
+tenant_fqdn VARCHAR(255) NOT NULL UNIQUE ,
+PRIMARY KEY (ms_teams_tenant_sid)
+) COMMENT='A Microsoft Teams customer tenant';
+
+CREATE TABLE sbc_addresses
+(
+sbc_address_sid CHAR(36) NOT NULL UNIQUE ,
+ipv4 VARCHAR(255) NOT NULL,
+port INTEGER NOT NULL DEFAULT 5060,
+service_provider_sid CHAR(36),
+PRIMARY KEY (sbc_address_sid)
+);
+
+CREATE TABLE users
+(
+user_sid CHAR(36) NOT NULL UNIQUE ,
+name CHAR(36) NOT NULL UNIQUE ,
+hashed_password VARCHAR(1024) NOT NULL,
+salt CHAR(16) NOT NULL,
+force_change BOOLEAN NOT NULL DEFAULT TRUE,
+PRIMARY KEY (user_sid)
+);
+
+CREATE TABLE voip_carriers
+(
+voip_carrier_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL UNIQUE ,
+description VARCHAR(255),
+account_sid CHAR(36) COMMENT 'if provided, indicates this entity represents a customer PBX that is associated with a specific account',
+application_sid CHAR(36) COMMENT 'If provided, all incoming calls from this source will be routed to the associated application',
+e164_leading_plus BOOLEAN NOT NULL DEFAULT false,
+requires_register BOOLEAN NOT NULL DEFAULT false,
+register_username VARCHAR(64),
+register_sip_realm VARCHAR(64),
+register_password VARCHAR(64),
+tech_prefix VARCHAR(16),
+diversion VARCHAR(32),
+is_active BOOLEAN NOT NULL DEFAULT true,
+PRIMARY KEY (voip_carrier_sid)
+) COMMENT='A Carrier or customer PBX that can send or receive calls';
+
+CREATE TABLE phone_numbers
+(
+phone_number_sid CHAR(36) UNIQUE ,
+number VARCHAR(32) NOT NULL UNIQUE ,
+voip_carrier_sid CHAR(36) NOT NULL,
+account_sid CHAR(36),
+application_sid CHAR(36),
+PRIMARY KEY (phone_number_sid)
+) ENGINE=InnoDB COMMENT='A phone number that has been assigned to an account';
+
+CREATE TABLE webhooks
+(
+webhook_sid CHAR(36) NOT NULL UNIQUE ,
+url VARCHAR(1024) NOT NULL,
+method ENUM("GET","POST") NOT NULL DEFAULT 'POST',
+username VARCHAR(255),
+password VARCHAR(255),
+PRIMARY KEY (webhook_sid)
+) COMMENT='An HTTP callback';
+
+CREATE TABLE sip_gateways
+(
+sip_gateway_sid CHAR(36),
+ipv4 VARCHAR(128) NOT NULL COMMENT 'ip address or DNS name of the gateway. For gateways providing inbound calling service, ip address is required.',
+port INTEGER NOT NULL DEFAULT 5060 COMMENT 'sip signaling port',
+inbound BOOLEAN NOT NULL COMMENT 'if true, whitelist this IP to allow inbound calls from the gateway',
+outbound BOOLEAN NOT NULL COMMENT 'if true, include in least-cost routing when placing calls to the PSTN',
+voip_carrier_sid CHAR(36) NOT NULL,
+is_active BOOLEAN NOT NULL DEFAULT 1,
+PRIMARY KEY (sip_gateway_sid)
+) COMMENT='A whitelisted sip gateway used for origination/termination';
+
+CREATE TABLE lcr_carrier_set_entry
+(
+lcr_carrier_set_entry_sid CHAR(36),
+workload INTEGER NOT NULL DEFAULT 1 COMMENT 'represents a proportion of traffic to send through the associated carrier; can be used for load balancing traffic across carriers with a common priority for a destination',
+lcr_route_sid CHAR(36) NOT NULL,
+voip_carrier_sid CHAR(36) NOT NULL,
+priority INTEGER NOT NULL DEFAULT 0 COMMENT 'lower priority carriers are attempted first',
+PRIMARY KEY (lcr_carrier_set_entry_sid)
+) COMMENT='An entry in the LCR routing list';
+
+CREATE TABLE applications
+(
+application_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL,
+account_sid CHAR(36) NOT NULL COMMENT 'account that this application belongs to',
+call_hook_sid CHAR(36) COMMENT 'webhook to call for inbound calls ',
+call_status_hook_sid CHAR(36) COMMENT 'webhook to call for call status events',
+messaging_hook_sid CHAR(36) COMMENT 'webhook to call for inbound SMS/MMS ',
+speech_synthesis_vendor VARCHAR(64) NOT NULL DEFAULT 'google',
+speech_synthesis_language VARCHAR(12) NOT NULL DEFAULT 'en-US',
+speech_synthesis_voice VARCHAR(64),
+speech_recognizer_vendor VARCHAR(64) NOT NULL DEFAULT 'google',
+speech_recognizer_language VARCHAR(64) NOT NULL DEFAULT 'en-US',
+PRIMARY KEY (application_sid)
+) COMMENT='A defined set of behaviors to be applied to phone calls ';
+
+CREATE TABLE service_providers
+(
+service_provider_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL UNIQUE ,
+description VARCHAR(255),
+root_domain VARCHAR(128) UNIQUE ,
+registration_hook_sid CHAR(36),
+ms_teams_fqdn VARCHAR(255),
+PRIMARY KEY (service_provider_sid)
+) COMMENT='A partition of the platform used by one service provider';
+
+CREATE TABLE accounts
+(
+account_sid CHAR(36) NOT NULL UNIQUE ,
+name VARCHAR(64) NOT NULL,
+sip_realm VARCHAR(132) UNIQUE COMMENT 'sip domain that will be used for devices registering under this account',
+service_provider_sid CHAR(36) NOT NULL COMMENT 'service provider that owns the customer relationship with this account',
+registration_hook_sid CHAR(36) COMMENT 'webhook to call when devices underr this account attempt to register',
+device_calling_application_sid CHAR(36) COMMENT 'application to use for outbound calling from an account',
+is_active BOOLEAN NOT NULL DEFAULT true,
+webhook_secret VARCHAR(36),
+disable_cdrs BOOLEAN NOT NULL DEFAULT 0,
+PRIMARY KEY (account_sid)
+) COMMENT='An enterprise that uses the platform for comm services';
+
+CREATE INDEX call_route_sid_idx ON call_routes (call_route_sid);
+ALTER TABLE call_routes ADD FOREIGN KEY account_sid_idxfk (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE call_routes ADD FOREIGN KEY application_sid_idxfk (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX api_key_sid_idx ON api_keys (api_key_sid);
+CREATE INDEX account_sid_idx ON api_keys (account_sid);
+ALTER TABLE api_keys ADD FOREIGN KEY account_sid_idxfk_1 (account_sid) REFERENCES accounts (account_sid);
+
+CREATE INDEX service_provider_sid_idx ON api_keys (service_provider_sid);
+ALTER TABLE api_keys ADD FOREIGN KEY service_provider_sid_idxfk (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+CREATE INDEX ms_teams_tenant_sid_idx ON ms_teams_tenants (ms_teams_tenant_sid);
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY service_provider_sid_idxfk_1 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY account_sid_idxfk_2 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE ms_teams_tenants ADD FOREIGN KEY application_sid_idxfk_1 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX tenant_fqdn_idx ON ms_teams_tenants (tenant_fqdn);
+CREATE INDEX sbc_addresses_idx_host_port ON sbc_addresses (ipv4,port);
+
+CREATE INDEX sbc_address_sid_idx ON sbc_addresses (sbc_address_sid);
+CREATE INDEX service_provider_sid_idx ON sbc_addresses (service_provider_sid);
+ALTER TABLE sbc_addresses ADD FOREIGN KEY service_provider_sid_idxfk_2 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+CREATE INDEX user_sid_idx ON users (user_sid);
+CREATE INDEX name_idx ON users (name);
+CREATE INDEX voip_carrier_sid_idx ON voip_carriers (voip_carrier_sid);
+CREATE INDEX name_idx ON voip_carriers (name);
+ALTER TABLE voip_carriers ADD FOREIGN KEY account_sid_idxfk_3 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE voip_carriers ADD FOREIGN KEY application_sid_idxfk_2 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX phone_number_sid_idx ON phone_numbers (phone_number_sid);
+CREATE INDEX voip_carrier_sid_idx ON phone_numbers (voip_carrier_sid);
+ALTER TABLE phone_numbers ADD FOREIGN KEY voip_carrier_sid_idxfk (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+ALTER TABLE phone_numbers ADD FOREIGN KEY account_sid_idxfk_4 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE phone_numbers ADD FOREIGN KEY application_sid_idxfk_3 (application_sid) REFERENCES applications (application_sid);
+
+CREATE INDEX webhook_sid_idx ON webhooks (webhook_sid);
+CREATE UNIQUE INDEX sip_gateway_idx_hostport ON sip_gateways (ipv4,port);
+
+ALTER TABLE sip_gateways ADD FOREIGN KEY voip_carrier_sid_idxfk_1 (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+ALTER TABLE lcr_carrier_set_entry ADD FOREIGN KEY lcr_route_sid_idxfk (lcr_route_sid) REFERENCES lcr_routes (lcr_route_sid);
+
+ALTER TABLE lcr_carrier_set_entry ADD FOREIGN KEY voip_carrier_sid_idxfk_2 (voip_carrier_sid) REFERENCES voip_carriers (voip_carrier_sid);
+
+CREATE UNIQUE INDEX applications_idx_name ON applications (account_sid,name);
+
+CREATE INDEX application_sid_idx ON applications (application_sid);
+CREATE INDEX account_sid_idx ON applications (account_sid);
+ALTER TABLE applications ADD FOREIGN KEY account_sid_idxfk_5 (account_sid) REFERENCES accounts (account_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY call_hook_sid_idxfk (call_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY call_status_hook_sid_idxfk (call_status_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE applications ADD FOREIGN KEY messaging_hook_sid_idxfk (messaging_hook_sid) REFERENCES webhooks (webhook_sid);
+
+CREATE INDEX service_provider_sid_idx ON service_providers (service_provider_sid);
+CREATE INDEX name_idx ON service_providers (name);
+CREATE INDEX root_domain_idx ON service_providers (root_domain);
+ALTER TABLE service_providers ADD FOREIGN KEY registration_hook_sid_idxfk (registration_hook_sid) REFERENCES webhooks (webhook_sid);
+
+CREATE INDEX account_sid_idx ON accounts (account_sid);
+CREATE INDEX sip_realm_idx ON accounts (sip_realm);
+CREATE INDEX service_provider_sid_idx ON accounts (service_provider_sid);
+ALTER TABLE accounts ADD FOREIGN KEY service_provider_sid_idxfk_3 (service_provider_sid) REFERENCES service_providers (service_provider_sid);
+
+ALTER TABLE accounts ADD FOREIGN KEY registration_hook_sid_idxfk_1 (registration_hook_sid) REFERENCES webhooks (webhook_sid);
+
+ALTER TABLE accounts ADD FOREIGN KEY device_calling_application_sid_idxfk (device_calling_application_sid) REFERENCES applications (application_sid);
+
+SET FOREIGN_KEY_CHECKS=1;
diff --git a/packer/jambonz-mini/proxmox/files/mod_avmd.c.patch b/packer/jambonz-mini/proxmox/files/mod_avmd.c.patch
new file mode 100644
index 0000000..ae995b9
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mod_avmd.c.patch
@@ -0,0 +1,25 @@
+--- mod_avmd.c 2022-02-10 11:19:05
++++ mod_avmd.c.new 2023-04-19 13:28:03
+@@ -1476,15 +1476,20 @@
+ }
+ if ((SWITCH_CALL_DIRECTION_OUTBOUND == switch_channel_direction(channel)) && (avmd_session->settings.outbound_channnel == 1)) {
+ flags |= SMBF_READ_REPLACE;
+- direction = "READ_REPLACE";
++ direction = "READ_REPLACE";
+ }
+- if ((SWITCH_CALL_DIRECTION_INBOUND == switch_channel_direction(channel)) && (avmd_session->settings.inbound_channnel == 1)) {
++ if ((SWITCH_CALL_DIRECTION_INBOUND == switch_channel_direction(channel)) /* && (avmd_session->settings.inbound_channnel == 1) */) {
++ /* DCH: for drachtio-fsmrf */
++ flags |= SMBF_READ_REPLACE;
++ direction = "READ_REPLACE";
++/*
+ flags |= SMBF_WRITE_REPLACE;
+ if (!strcmp(direction, "READ_REPLACE")) {
+ direction = "READ_REPLACE | WRITE_REPLACE";
+ } else {
+ direction = "WRITE_REPLACE";
+ }
++*/
+ }
+
+ if (flags == 0) {
diff --git a/packer/jambonz-mini/proxmox/files/mod_httapi.c.patch b/packer/jambonz-mini/proxmox/files/mod_httapi.c.patch
new file mode 100644
index 0000000..eab370d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mod_httapi.c.patch
@@ -0,0 +1,55 @@
+--- mod_httapi.c 2023-03-01 13:57:28
++++ mod_httapi.c.new 2023-03-01 14:19:34
+@@ -2472,6 +2472,12 @@
+ char *ua = NULL;
+ const char *profile_name = NULL;
+ int tries = 10;
++ int awsSignedUrl = strstr(url, "X-Amz-Signature") != NULL &&
++ strstr(url, "X-Amz-Algorithm") != NULL &&
++ strstr(url, "X-Amz-Credential") != NULL &&
++ strstr(url, "X-Amz-Date") != NULL &&
++ strstr(url, "X-Amz-Expires") != NULL &&
++ strstr(url, "X-Amz-SignedHeaders") != NULL;
+
+ if (context->url_params) {
+ profile_name = switch_event_get_header(context->url_params, "profile_name");
+@@ -2614,7 +2620,7 @@
+ switch_curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, (void *) client);
+ } else {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_HEADER, 1);
+- switch_curl_easy_setopt(curl_handle, CURLOPT_NOBODY, 1);
++ if (!awsSignedUrl) switch_curl_easy_setopt(curl_handle, CURLOPT_NOBODY, 1);
+ }
+
+ if (headers) {
+@@ -2783,6 +2789,12 @@
+ char *metadata;
+ const char *ext = NULL;
+ const char *err_msg = NULL;
++ int awsSignedUrl = strstr(url, "X-Amz-Signature") != NULL &&
++ strstr(url, "X-Amz-Algorithm") != NULL &&
++ strstr(url, "X-Amz-Credential") != NULL &&
++ strstr(url, "X-Amz-Date") != NULL &&
++ strstr(url, "X-Amz-Expires") != NULL &&
++ strstr(url, "X-Amz-SignedHeaders") != NULL;
+
+ load_cache_data(context, url);
+
+@@ -2831,7 +2843,7 @@
+
+ if (!unreachable && !zstr(context->metadata)) {
+ metadata = switch_core_sprintf(context->pool, "%s:%s:%s:%s:%s",
+- url,
++ awsSignedUrl ? context->cache_file : url,
+ switch_event_get_header_nil(headers, "last-modified"),
+ switch_event_get_header_nil(headers, "etag"),
+ switch_event_get_header_nil(headers, "content-length"),
+@@ -2855,7 +2867,7 @@
+
+
+ metadata = switch_core_sprintf(context->pool, "%s:%s:%s:%s:%s",
+- url,
++ awsSignedUrl ? context->cache_file : url,
+ switch_event_get_header_nil(headers, "last-modified"),
+ switch_event_get_header_nil(headers, "etag"),
+ switch_event_get_header_nil(headers, "content-length"),
diff --git a/packer/jambonz-mini/proxmox/files/mod_opusfile.c.patch b/packer/jambonz-mini/proxmox/files/mod_opusfile.c.patch
new file mode 100644
index 0000000..08d43cb
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mod_opusfile.c.patch
@@ -0,0 +1,13 @@
+--- mod_opusfile.c 2019-09-25 08:55:37.000000000 -0400
++++ mod_opusfile.c.new 2020-01-02 10:24:57.000000000 -0500
+@@ -282,7 +282,9 @@
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "[OGG/OPUS File] Channels: %i\n", head->channel_count);
+ if (head->input_sample_rate) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "[OGG/OPUS File] Original sampling rate: %lu Hz\n", (unsigned long)head->input_sample_rate);
+- handle->samplerate = context->samplerate = head->input_sample_rate;
++ // DH: per https://github.com/xiph/opusfile/blob/d2577d7fdfda04bc32a853e80e62d6faa2a20859/include/opusfile.h#L56
++ // the API always decodes to 48kHz, and we should not be telling freeswitch to play out the originally recorded sample rate
++ // handle->samplerate = context->samplerate = head->input_sample_rate;
+ }
+ }
+ if (op_seekable(context->of)) {
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.in.extra b/packer/jambonz-mini/proxmox/files/modules.conf.in.extra
new file mode 100644
index 0000000..081860d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.in.extra
@@ -0,0 +1,187 @@
+applications/mod_audio_fork
+applications/mod_aws_lex
+applications/mod_aws_transcribe
+applications/mod_azure_transcribe
+applications/mod_deepgram_transcribe
+applications/mod_google_tts
+applications/mod_google_transcribe
+applications/mod_ibm_transcribe
+applications/mod_jambonz_transcribe
+applications/mod_nuance_transcribe
+applications/mod_nvidia_transcribe
+applications/mod_soniox_transcribe
+applications/mod_dialogflow
+#applications/mod_abstraction
+#applications/mod_av
+applications/mod_avmd
+#applications/mod_bert
+#applications/mod_blacklist
+#applications/mod_callcenter
+#applications/mod_cidlookup
+#applications/mod_cluechoo
+applications/mod_commands
+applications/mod_conference
+#applications/mod_curl
+#applications/mod_cv
+#applications/mod_db
+#applications/mod_directory
+#applications/mod_distributor
+applications/mod_dptools
+#applications/mod_easyroute
+#applications/mod_enum
+#applications/mod_esf
+#applications/mod_esl
+#applications/mod_expr
+#applications/mod_fifo
+#applications/mod_fsk
+#applications/mod_fsv
+#applications/mod_hash
+#applications/mod_hiredis
+applications/mod_httapi
+#applications/mod_http_cache
+#applications/mod_ladspa
+#applications/mod_lcr
+#applications/mod_memcache
+#applications/mod_mongo
+#applications/mod_mp4
+#applications/mod_mp4v2
+#applications/mod_nibblebill
+#applications/mod_oreka
+#applications/mod_osp
+#applications/mod_prefix
+#applications/mod_rad_auth
+#applications/mod_redis
+#applications/mod_rss
+#applications/mod_signalwire
+#applications/mod_sms
+#applications/mod_sms_flowroute
+#applications/mod_snapshot
+#applications/mod_snom
+#applications/mod_sonar
+#applications/mod_soundtouch
+applications/mod_spandsp
+#applications/mod_spy
+#applications/mod_stress
+#applications/mod_translate
+#applications/mod_valet_parking
+#applications/mod_video_filter
+#applications/mod_vmd
+#applications/mod_voicemail
+#applications/mod_voicemail_ivr
+#asr_tts/mod_cepstral
+#asr_tts/mod_flite
+#asr_tts/mod_pocketsphinx
+#asr_tts/mod_tts_commandline
+#asr_tts/mod_unimrcp
+codecs/mod_amr
+#codecs/mod_amrwb
+#codecs/mod_b64
+#codecs/mod_bv
+#codecs/mod_clearmode
+#codecs/mod_codec2
+#codecs/mod_com_g729
+#codecs/mod_dahdi_codec
+codecs/mod_g723_1
+codecs/mod_g729
+codecs/mod_h26x
+#codecs/mod_ilbc
+#codecs/mod_isac
+#codecs/mod_mp4v
+codecs/mod_opus
+#codecs/mod_sangoma_codec
+#codecs/mod_silk
+#codecs/mod_siren
+#codecs/mod_theora
+#databases/mod_mariadb
+#databases/mod_pgsql
+#dialplans/mod_dialplan_asterisk
+#dialplans/mod_dialplan_directory
+dialplans/mod_dialplan_xml
+#directories/mod_ldap
+#endpoints/mod_alsa
+#endpoints/mod_dingaling
+#endpoints/mod_gsmopen
+#endpoints/mod_h323
+#endpoints/mod_khomp
+#endpoints/mod_loopback
+#endpoints/mod_opal
+#endpoints/mod_portaudio
+endpoints/mod_rtc
+#endpoints/mod_rtmp
+#endpoints/mod_skinny
+endpoints/mod_sofia
+#endpoints/mod_verto
+#event_handlers/mod_amqp
+event_handlers/mod_cdr_csv
+#event_handlers/mod_cdr_mongodb
+#event_handlers/mod_cdr_pg_csv
+#event_handlers/mod_cdr_sqlite
+#event_handlers/mod_erlang_event
+#event_handlers/mod_event_multicast
+event_handlers/mod_event_socket
+#event_handlers/mod_fail2ban
+#event_handlers/mod_format_cdr
+#event_handlers/mod_json_cdr
+#event_handlers/mod_radius_cdr
+#event_handlers/mod_odbc_cdr
+#event_handlers/mod_kazoo
+#event_handlers/mod_rayo
+#event_handlers/mod_smpp
+#event_handlers/mod_snmp
+#event_handlers/mod_event_zmq
+#formats/mod_imagick
+formats/mod_local_stream
+formats/mod_native_file
+#formats/mod_png
+#formats/mod_portaudio_stream
+#formats/mod_shell_stream
+formats/mod_shout
+formats/mod_sndfile
+#formats/mod_ssml
+formats/mod_tone_stream
+#formats/mod_vlc
+formats/mod_opusfile
+#languages/mod_basic
+#languages/mod_java
+#languages/mod_lua
+#languages/mod_managed
+#languages/mod_perl
+#languages/mod_python
+#languages/mod_v8
+#languages/mod_yaml
+loggers/mod_console
+#loggers/mod_graylog2
+loggers/mod_logfile
+loggers/mod_syslog
+#loggers/mod_raven
+#say/mod_say_de
+say/mod_say_en
+#say/mod_say_es
+#say/mod_say_es_ar
+#say/mod_say_fa
+#say/mod_say_fr
+#say/mod_say_he
+#say/mod_say_hr
+#say/mod_say_hu
+#say/mod_say_it
+#say/mod_say_ja
+#say/mod_say_nl
+#say/mod_say_pl
+#say/mod_say_pt
+#say/mod_say_ru
+#say/mod_say_sv
+#say/mod_say_th
+#say/mod_say_zh
+#timers/mod_posix_timer
+#timers/mod_timerfd
+xml_int/mod_xml_cdr
+#xml_int/mod_xml_curl
+#xml_int/mod_xml_ldap
+#xml_int/mod_xml_radius
+#xml_int/mod_xml_rpc
+#xml_int/mod_xml_scgi
+
+#../../libs/freetdm/mod_freetdm
+
+## Experimental Modules (don't cry if they're broken)
+#../../contrib/mod/xml_int/mod_xml_odbc
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.in.grpc.patch b/packer/jambonz-mini/proxmox/files/modules.conf.in.grpc.patch
new file mode 100644
index 0000000..4d43e27
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.in.grpc.patch
@@ -0,0 +1,10 @@
+--- modules.conf.in 2019-10-23 15:09:23.114079884 +0000
++++ modules.conf.in.new 2019-10-23 15:10:08.330364591 +0000
+@@ -1,4 +1,7 @@
+ applications/mod_audio_fork
++applications/mod_google_tts
++applications/mod_google_transcribe
++applications/mod_dialogflow
+ #applications/mod_abstraction
+ applications/mod_av
+ #applications/mod_avmd
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.in.patch b/packer/jambonz-mini/proxmox/files/modules.conf.in.patch
new file mode 100644
index 0000000..a470b5c
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.in.patch
@@ -0,0 +1,139 @@
+--- modules.conf.in 2019-09-25 08:55:34.000000000 -0400
++++ modules.conf.in.new 2020-01-02 10:36:07.000000000 -0500
+@@ -1,5 +1,6 @@
++applications/mod_audio_fork
+ #applications/mod_abstraction
+-applications/mod_av
++#applications/mod_av
+ #applications/mod_avmd
+ #applications/mod_bert
+ #applications/mod_blacklist
+@@ -10,19 +11,19 @@
+ applications/mod_conference
+ #applications/mod_curl
+ #applications/mod_cv
+-applications/mod_db
++#applications/mod_db
+ #applications/mod_directory
+ #applications/mod_distributor
+ applications/mod_dptools
+ #applications/mod_easyroute
+-applications/mod_enum
+-applications/mod_esf
++#applications/mod_enum
++#applications/mod_esf
+ #applications/mod_esl
+-applications/mod_expr
+-applications/mod_fifo
++#applications/mod_expr
++#applications/mod_fifo
+ #applications/mod_fsk
+-applications/mod_fsv
+-applications/mod_hash
++#applications/mod_fsv
++#applications/mod_hash
+ #applications/mod_hiredis
+ applications/mod_httapi
+ #applications/mod_http_cache
+@@ -39,8 +40,8 @@
+ #applications/mod_rad_auth
+ #applications/mod_redis
+ #applications/mod_rss
+-applications/mod_signalwire
+-applications/mod_sms
++#applications/mod_signalwire
++#applications/mod_sms
+ #applications/mod_sms_flowroute
+ #applications/mod_snapshot
+ #applications/mod_snom
+@@ -50,10 +51,10 @@
+ #applications/mod_spy
+ #applications/mod_stress
+ #applications/mod_translate
+-applications/mod_valet_parking
++#applications/mod_valet_parking
+ #applications/mod_video_filter
+ #applications/mod_vmd
+-applications/mod_voicemail
++#applications/mod_voicemail
+ #applications/mod_voicemail_ivr
+ #asr_tts/mod_cepstral
+ #asr_tts/mod_flite
+@@ -62,7 +63,7 @@
+ #asr_tts/mod_unimrcp
+ codecs/mod_amr
+ #codecs/mod_amrwb
+-codecs/mod_b64
++#codecs/mod_b64
+ #codecs/mod_bv
+ #codecs/mod_clearmode
+ #codecs/mod_codec2
+@@ -80,8 +81,8 @@
+ #codecs/mod_siren
+ #codecs/mod_theora
+ #databases/mod_mariadb
+-databases/mod_pgsql
+-dialplans/mod_dialplan_asterisk
++#databases/mod_pgsql
++#dialplans/mod_dialplan_asterisk
+ #dialplans/mod_dialplan_directory
+ dialplans/mod_dialplan_xml
+ #directories/mod_ldap
+@@ -90,19 +91,19 @@
+ #endpoints/mod_gsmopen
+ #endpoints/mod_h323
+ #endpoints/mod_khomp
+-endpoints/mod_loopback
++#endpoints/mod_loopback
+ #endpoints/mod_opal
+ #endpoints/mod_portaudio
+ endpoints/mod_rtc
+ #endpoints/mod_rtmp
+-endpoints/mod_skinny
++#endpoints/mod_skinny
+ endpoints/mod_sofia
+-endpoints/mod_verto
++#endpoints/mod_verto
+ #event_handlers/mod_amqp
+ event_handlers/mod_cdr_csv
+ #event_handlers/mod_cdr_mongodb
+ #event_handlers/mod_cdr_pg_csv
+-event_handlers/mod_cdr_sqlite
++#event_handlers/mod_cdr_sqlite
+ #event_handlers/mod_erlang_event
+ #event_handlers/mod_event_multicast
+ event_handlers/mod_event_socket
+@@ -119,18 +120,18 @@
+ #formats/mod_imagick
+ formats/mod_local_stream
+ formats/mod_native_file
+-formats/mod_png
++#formats/mod_png
+ #formats/mod_portaudio_stream
+ #formats/mod_shell_stream
+-#formats/mod_shout
++formats/mod_shout
+ formats/mod_sndfile
+ #formats/mod_ssml
+ formats/mod_tone_stream
+ #formats/mod_vlc
+-#formats/mod_opusfile
++formats/mod_opusfile
+ #languages/mod_basic
+ #languages/mod_java
+-languages/mod_lua
++#languages/mod_lua
+ #languages/mod_managed
+ #languages/mod_perl
+ #languages/mod_python
+@@ -165,8 +166,8 @@
+ #xml_int/mod_xml_curl
+ #xml_int/mod_xml_ldap
+ #xml_int/mod_xml_radius
+-xml_int/mod_xml_rpc
+-xml_int/mod_xml_scgi
++#xml_int/mod_xml_rpc
++#xml_int/mod_xml_scgi
+
+ #../../libs/freetdm/mod_freetdm
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.patch b/packer/jambonz-mini/proxmox/files/modules.conf.patch
new file mode 100644
index 0000000..cc29321
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.patch
@@ -0,0 +1,118 @@
+--- modules.conf 2017-04-25 18:44:46.772490196 +0000
++++ modules.conf.new 2017-04-25 18:47:05.967886830 +0000
+@@ -10,19 +10,19 @@
+ applications/mod_conference
+ #applications/mod_curl
+ #applications/mod_cv
+-applications/mod_db
++#applications/mod_db
+ #applications/mod_directory
+ #applications/mod_distributor
+ applications/mod_dptools
+ #applications/mod_easyroute
+-applications/mod_enum
+-applications/mod_esf
++#applications/mod_enum
++#applications/mod_esf
+ #applications/mod_esl
+ applications/mod_expr
+-applications/mod_fifo
++#applications/mod_fifo
+ #applications/mod_fsk
+-applications/mod_fsv
+-applications/mod_hash
++#applications/mod_fsv
++#applications/mod_hash
+ #applications/mod_hiredis
+ applications/mod_httapi
+ #applications/mod_http_cache
+@@ -39,19 +39,19 @@
+ #applications/mod_rad_auth
+ #applications/mod_redis
+ #applications/mod_rss
+-applications/mod_sms
++#applications/mod_sms
+ #applications/mod_sms_flowroute
+ #applications/mod_snapshot
+ #applications/mod_snom
+ #applications/mod_sonar
+ #applications/mod_soundtouch
+-applications/mod_spandsp
++#applications/mod_spandsp
+ #applications/mod_spy
+ #applications/mod_stress
+ #applications/mod_translate
+-applications/mod_valet_parking
++#applications/mod_valet_parking
+ #applications/mod_vmd
+-applications/mod_voicemail
++#applications/mod_voicemail
+ #applications/mod_voicemail_ivr
+ #asr_tts/mod_cepstral
+ #asr_tts/mod_flite
+@@ -67,7 +67,7 @@
+ #codecs/mod_com_g729
+ #codecs/mod_dahdi_codec
+ codecs/mod_g723_1
+-codecs/mod_g729
++#codecs/mod_g729
+ codecs/mod_h26x
+ #codecs/mod_ilbc
+ #codecs/mod_isac
+@@ -77,7 +77,7 @@
+ #codecs/mod_silk
+ #codecs/mod_siren
+ #codecs/mod_theora
+-dialplans/mod_dialplan_asterisk
++#dialplans/mod_dialplan_asterisk
+ #dialplans/mod_dialplan_directory
+ dialplans/mod_dialplan_xml
+ #directories/mod_ldap
+@@ -89,17 +89,17 @@
+ endpoints/mod_loopback
+ #endpoints/mod_opal
+ #endpoints/mod_portaudio
+-endpoints/mod_rtc
++#endpoints/mod_rtc
+ #endpoints/mod_rtmp
+-endpoints/mod_skinny
++#endpoints/mod_skinny
+ #endpoints/mod_skypopen
+ endpoints/mod_sofia
+-endpoints/mod_verto
++#endpoints/mod_verto
+ #event_handlers/mod_amqp
+-event_handlers/mod_cdr_csv
++#event_handlers/mod_cdr_csv
+ #event_handlers/mod_cdr_mongodb
+ #event_handlers/mod_cdr_pg_csv
+-event_handlers/mod_cdr_sqlite
++#event_handlers/mod_cdr_sqlite
+ #event_handlers/mod_erlang_event
+ #event_handlers/mod_event_multicast
+ event_handlers/mod_event_socket
+@@ -125,7 +125,7 @@
+ #formats/mod_vlc
+ #languages/mod_basic
+ #languages/mod_java
+-languages/mod_lua
++#languages/mod_lua
+ #languages/mod_managed
+ #languages/mod_perl
+ #languages/mod_python
+@@ -155,12 +155,12 @@
+ #say/mod_say_zh
+ #timers/mod_posix_timer
+ #timers/mod_timerfd
+-xml_int/mod_xml_cdr
++#xml_int/mod_xml_cdr
+ #xml_int/mod_xml_curl
+ #xml_int/mod_xml_ldap
+ #xml_int/mod_xml_radius
+-xml_int/mod_xml_rpc
+-xml_int/mod_xml_scgi
++#xml_int/mod_xml_rpc
++#xml_int/mod_xml_scgi
+
+ #../../libs/freetdm/mod_freetdm
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.extra b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.extra
new file mode 100644
index 0000000..4774bf7
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.extra
@@ -0,0 +1,160 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc
new file mode 100644
index 0000000..beee2ad
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc
@@ -0,0 +1,151 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc.patch b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc.patch
new file mode 100644
index 0000000..3d5cf68
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.grpc.patch
@@ -0,0 +1,12 @@
+--- modules.conf.xml 2019-05-15 21:08:29.049449029 +0000
++++ modules.conf.xml.new 2019-05-15 22:05:00.303623468 +0000
+@@ -7,6 +7,9 @@
+
+
+
++
++
++
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.lws b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.lws
new file mode 100644
index 0000000..835d72c
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.lws
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.patch b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.patch
new file mode 100644
index 0000000..57aca31
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.vanilla.xml.patch
@@ -0,0 +1,105 @@
+--- modules.conf.xml 2019-09-30 19:01:33.304020805 +0000
++++ modules.conf.xml.new 2019-09-30 23:11:23.371830901 +0000
+@@ -1,5 +1,6 @@
+
+
++
+
+
+
+@@ -10,7 +11,7 @@
+
+
+
+-
++
+
+
+
+@@ -39,7 +40,7 @@
+
+
+
+-
++
+
+
+
+@@ -47,28 +48,28 @@
+
+
+
+-
++
+
+
+-
++
+
+
+
+-
++
+
+-
+-
+-
++
++
++
+
+-
++
+
+
+
+
+-
+-
++
++
+
+-
++
+
+
+
+@@ -87,7 +88,7 @@
+
+
+
+-
++
+
+
+
+@@ -96,17 +97,17 @@
+
+
+
+-
++
+
+
+
+
+
+-
++
+
+
+
+-
++
+
+
+
+@@ -123,7 +124,7 @@
+
+
+
+-
++
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/modules.conf.xml b/packer/jambonz-mini/proxmox/files/modules.conf.xml
new file mode 100644
index 0000000..b1fc065
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/modules.conf.xml
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/mrf_dialplan.xml b/packer/jambonz-mini/proxmox/files/mrf_dialplan.xml
new file mode 100644
index 0000000..eaf85f2
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mrf_dialplan.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/mrf_sip_profile.xml b/packer/jambonz-mini/proxmox/files/mrf_sip_profile.xml
new file mode 100644
index 0000000..ba7559c
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mrf_sip_profile.xml
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/mysql-server.key b/packer/jambonz-mini/proxmox/files/mysql-server.key
new file mode 100644
index 0000000..4686449
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/mysql-server.key
@@ -0,0 +1,432 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQGiBD4+owwRBAC14GIfUfCyEDSIePvEW3SAFUdJBtoQHH/nJKZyQT7h9bPlUWC3
+RODjQReyCITRrdwyrKUGku2FmeVGwn2u2WmDMNABLnpprWPkBdCk96+OmSLN9brZ
+fw2vOUgCmYv2hW0hyDHuvYlQA/BThQoADgj8AW6/0Lo7V1W9/8VuHP0gQwCgvzV3
+BqOxRznNCRCRxAuAuVztHRcEAJooQK1+iSiunZMYD1WufeXfshc57S/+yeJkegNW
+hxwR9pRWVArNYJdDRT+rf2RUe3vpquKNQU/hnEIUHJRQqYHo8gTxvxXNQc7fJYLV
+K2HtkrPbP72vwsEKMYhhr0eKCbtLGfls9krjJ6sBgACyP/Vb7hiPwxh6rDZ7ITnE
+kYpXBACmWpP8NJTkamEnPCia2ZoOHODANwpUkP43I7jsDmgtobZX9qnrAXw+uNDI
+QJEXM6FSbi0LLtZciNlYsafwAPEOMDKpMqAK6IyisNtPvaLd8lH0bPAnWqcyefep
+rv0sxxqUEMcM3o7wwgfN83POkDasDbs3pjwPhxvhz6//62zQJ7Q2TXlTUUwgUmVs
+ZWFzZSBFbmdpbmVlcmluZyA8bXlzcWwtYnVpbGRAb3NzLm9yYWNsZS5jb20+iGwE
+ExECACwCGyMCHgECF4ACGQEGCwkIBwMCBhUKCQgCAwUWAgMBAAUCXEBY+wUJI87e
+5AAKCRCMcY07UHLh9RZPAJ9uvm0zlzfCN+DHxHVaoFLFjdVYTQCfborsC9tmEZYa
+whhogjeBkZkorbyIaQQTEQIAKQIbIwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAhkB
+BQJTAdRmBQkaZsvLAAoJEIxxjTtQcuH1X4MAoKNLWAbCBUj96637kv6Xa/fJuX5m
+AJwPtmgDfjUe2iuhXdTrFEPT19SB6ohmBBMRAgAmAhsjBgsJCAcDAgQVAggDBBYC
+AwECHgECF4AFAk53PioFCRP7AhUACgkQjHGNO1By4fUmzACeJdfqgc9gWTUhgmcM
+AOmG4RjwuxcAoKfM+U8yMOGELi+TRif7MtKEms6piGkEExECACkCGyMGCwkIBwMC
+BBUCCAMEFgIDAQIeAQIXgAIZAQUCUZSROgUJFTchqgAKCRCMcY07UHLh9YtAAJ9X
+rA/ymlmozPZn+A9ls8/uwMcTsQCfaQMNq1dNkhH2kyByc3Rx9/W2xfqJARwEEAEC
+AAYFAlAS6+UACgkQ8aIC+GoXHivrWwf/dtLk/x+NC2VMDlg+vOeM0qgG1IlhXZfi
+NsEisvvGaz4m8fSFRGe+1bvvfDoKRhxiGXU48RusjixzvBb6KTMuY6JpOVfz9Dj3
+H9spYriHa+i6rYySXZIpOhfLiMnTy7NH2OvYCyNzSS/ciIUACIfH/2NH8zNT5CNF
+1uPNRs7HsHzzz7pOlTjtTWiF4cq/Ij6Z6CNrmdj+SiMvjYN9u6sdEKGtoNtpycgD
+5HGKR+I7Nd/7v56yhaUe4FpuvsNXig86K9tI6MUFS8CUyy7Hj3kVBZOUWVBM053k
+nGdALSygQr50DA3jMGKVl4ZnHje2RVWRmFTr5YWoRTMxUSQPMLpBNIkBHAQQAQIA
+BgUCU1B+vQAKCRAohbcD0zcc8dWwCACWXXWDXIcAWRUw+j3ph8dr9u3SItljn3wB
+c7clpclKWPuLvTz7lGgzlVB0s8hH4xgkSA+zLzl6u56mpUzskFl7f1I3Ac9GGpM4
+0M5vmmR9hwlD1HdZtGfbD+wkjlqgitNLoRcGdRf/+U7x09GhSS7Bf339sunIX6sM
+gXSC4L32D3zDjF5icGdb0kj+3lCrRmp853dGyA3ff9yUiBkxcKNawpi7Vz3D2ddU
+pOF3BP+8NKPg4P2+srKgkFbd4HidcISQCt3rY4vaTkEkLKg0nNA6U4r0YgOa7wIT
+SsxFlntMMzaRg53QtK0+YkH0KuZR3GY8B7pi+tlgycyVR7mIFo7riQEcBBABCAAG
+BQJWgVd0AAoJEEZu4b/gk4UKk9MH/Rnt7EccPjSJC5CrB2AU5LY2Dsr+PePI2ubP
+WsEdG82qSjjGpbhIH8LSg/PzQoGHiFWMmmZWJktRT+dcgLbs3b2VwCNAwCE8jOHd
+UkQhEowgomdNvHiBHKHjP4/lF68KOPiO/2mxYYkmpM7BWf3kB57DJ5CTi3/JLoN7
+zF40qIs/p09ePvnwStpglbbtUn7XPO+1/Ee8VHzimABom52PkQIuxNiVUzLVn3bS
+Wqrd5ecuqLk6yzjPXd2XhDHWC9Twpl68GePru6EzQtusi0m6S/sHgEXqh/IxrFZV
+JlljF75JvosZq5zeulr0i6kOij+Y1p6MFffihITZ1gTmk+CLvK2JASIEEAECAAwF
+Ak53QS4FAwASdQAACgkQlxC4m8pXrXwJ8Qf/be/UO9mqfoc2sMyhwMpN4/fdBWwf
+LkA12FXQDOQMvwH9HsmEjnfUgYKXschZRi+DuHXe1P7l8G2aQLubhBsQf9ejKvRF
+TzuWMQkdIq+6Koulxv6ofkCcv3d1xtO2W7nb5yxcpVBPrRfGFGebJvZa58DymCNg
+yGtAU6AOz4veavNmI2+GIDQsY66+tYDvZ+CxwzdYu+HDV9HmrJfc6deM0mnBn7SR
+jqzxJPgoTQhihTav6q/R5/2p5NvQ/H84OgS6GjosfGc2duUDzCP/kheMRKfzuyKC
+OHQPtJuIj8++gfpHtEU7IDUX1So3c9n0PdpeBvclsDbpRnCNxQWU4mBot4kBIgQQ
+AQIADAUCToi2GQUDABJ1AAAKCRCXELibyletfLZAB/9oRqx+NC98UQD/wlxCRytz
+vi/MuPnbgQUPLHEap10tvEi33S/H/xDR/tcGofY4cjAvo5skZXXeWq93Av7PACUb
+zkg0X0eSr2oL6wy66xfov72AwSuX+iUK68qtKaLqRLitM02y8aNRV/ggKvt7UMvG
+mOvs5yLaYlobyvGaFC2ClfkNOt2MlVnQZCmnYBCwOktPGkExiu2yZMifcYGxQcpH
+KVFG59KeF2cM2d4xYM8HJqkSGGW306LFVSyeRwG+wbttgLpD5bM/T2b3fF/J35ra
+CSMLZearRTq8aygPl+XM7MM2eR946aw6jmOsgNBErbvvIdQj6LudAZj+8imcXV2K
+iQEiBBABAgAMBQJOmdnRBQMAEnUAAAoJEJcQuJvKV618AvIIAIEF1ZJ+Ry7WOdKF
+5oeQ/ynaYUigzN92fW/9zB8yuQlngkFJGidYMbci1tR1siziIVJFusR3ZonqAPGK
+/SUta9Y6KWLhmc7c5UnEHklq/NfdMZ2WVSIykXlctqw0sbb+z1ecEd4G8u9j5ill
+MO1B36rQayYAPoeXLX8dY4VyFLVGaQ00rWQBYFZrpw16ATWbWGJP332NSfCk4zZq
+6kXEW07q0st3YBgAAGdNQyEeZCa4d4pBRSX6189Kjg6GDnIcaiOF6HO6PLr9fRlL
+r5ObCgU+G9gEhfiVwDEV9E+7/Bq2pYZ9whhkBqWQzdpXTNTM24uaEhE01EPO5zeC
+O214q6mJASIEEAECAAwFAk6rpgEFAwASdQAACgkQlxC4m8pXrXzAhwf/f9O99z16
+3Y5FZVIxexyqXQ/Mct9uKHuXEVnRFYbA49dQLD4S73N+zN7gn9jFeQcBo4w8qVUV
+94U/ta/VbLkdtNREyplPM4XY8YE5Wfd9bfyg3q1PbEiVjk995sBF+2+To99YYKst
+gXPqjlH0jUfEyDmexOj+hsp8Rc63kvkIx36VBa4ONRYFefGAhKDMigL2YAhc1UkG
+tkGTuLmlCGwIV6lviDZD3RJf5375VFnaHv7eXfwQxCwE+BxG3CURrjfxjaxMTmMP
+yAG2rhDp5oTUEvqDYNbko5UxYOmrSjvF4FzXwqerElXJUkUzSh0pp7RxHB/1lCxD
+s7D1F1hlgFQuNIkBIgQQAQIADAUCTrzZHAUDABJ1AAAKCRCXELibyletfMUpB/4s
+07dREULIBnA1D6qr3fHsQJNZqbAuyDlvgGGLWzoyEDs+1JMFFlaa+EeLIo1386GU
+2DammDC23p3IB79uQhJeD2Z1TcVg4cA64SfF/CHca5coeRSrdAiudzU/cgLGtXIP
+/OaFamXgdMxAhloLFbSHPCZkyb00phVa8+xeIVDrK1HByZsNIXy/SSK8U26S2PVZ
+2o14fWvKbJ1Aga8N6DuWY/D8P2mi3RAbiuZgfzkmKL5idH/wSKfnFKdTgJzssdCc
+1jZEGVk5rFYcWOrJARHeP/tsnb/UxKBEsNtO7e3N2e/rLVnEykVIO066hz7xZK/V
+NBSpx3k3qj4XPK41IHy2iQEiBBABAgAMBQJOzqO8BQMAEnUAAAoJEJcQuJvKV618
+2twH/0IzjXLxN45nvIfEjC75a+i9ZSLlqR8lsHL4GpEScFKI0a0lT4IVAIY2RKG+
+MAs2eHm0UfKuwGs5jluRZ9RqKrc61sY0XQV9/7znY9Db16ghX04JjknOKs/fPi87
+rvKkB/QxJWS8qbb/erRmW+cPNjbRxTFPS5JIwFWHA16ieFEpvdAgKV6nfvJVTq1r
+jPDcnIA9CJN2SmUFx9Qx3SRc6ITbam1hjFnY6sCh6AUhxLI2f1mq1xH9PqEy42Um
+68prRqTyJ7Iox1g/UDDkeeUcAg7T1viTz7uXpS3Wrq4zzo4yOpaJfLDR3pI5g2Zk
+SNGTMo6aySE4OABt8i1Pc1Pm6AmJASIEEAECAAwFAk7yPFYFAwASdQAACgkQlxC4
+m8pXrXzXiAf9FrXe0lgcPM+tYOWMLhv5gXJi2VUBaLxpyRXm/kJcmxInKq1GCd3y
+D4/FLHNu3ZcCz/uklPAbZXWI0O6ewq0LWsRtklmJjWiedH+hGyaTv95VklojRIBd
+8nBaJ6M98rljMBHTFwWvjQFVf4FLRJQZqHlvjcCkq2Dd9BWJpGXvr/gpKkmMJYNK
+/ftfZRcChb35NI19WRpOhj9u808OPcqKVvZBcPwFGV5cEBzmAC94J7JcD8+S8Ik8
+iUJMQGGL3QcmZOBozovh86hj7KTSEBHlLXl832z89H1hLeuLbnXoGLv3zeUFSxkv
+1h35LhZLqIMDQRXLuUzxGHMBpLhPyGWRJ4kBIgQQAQIADAUCTwQJFwUDABJ1AAAK
+CRCXELibyletfABvB/9Cy69cjOqLGywITs3Cpg//40jmdhSAVxilJivP6J5bubFH
+DJlVTx541Dv5h4hTG2BQuueQ4q1VCpSGW+rHcdhPyvmZGRz1rxdQQGh1Dv0Bod2c
+3PJVSYPSrRSwCZJkJHOtVRBdjK4mkZb5aFTza+Tor9kxzj4FcXVd4KAS+hHQHYHc
+Ar8tt2eOLzqdEFTULeGiSoNn+PVzvzdfhndphK+8F2jfQ2UKuc01O7k0Yn9xZVx0
+OG6fE1gStzLv7C5amWLRd8+xh+MN0G8MgNglpBoExsEMMlPBYSUHa6lxpdMNMuib
+rIyVncE9X8QOhImt8K0sNn/EdbuldJNGYbDLt7O4iQEiBBABAgAMBQJPFdTcBQMA
+EnUAAAoJEJcQuJvKV6184owH+wZ/uLpezXnSxigeH1sig72QEXMrNd5DVHCJdig3
+bo+K5YmmN710/m5z+63XKUEWpd6/knajObgckThzWftNeK1SSFQGPmoYZP9EZnSU
+7L+/dSUpExbj842G5LYagrCyMGtlxRywWEmbi72TKS/JOK0jLiOdvVy+PHrZSu0D
+TVQ7cJh1BmPsbz7zzxjmcI5l+7B7K7RHZHq45nDLoIabwDacj7BXvBK0Ajqz4QyJ
+GQUjXC7q+88I+ptPvOXlE5nI/NbiCJOMI6d/bWN1KwYrC80fZuFaznfQFcPyUaDw
+yRaun+K3kEji2wXecq+yMmLUEp01TKsUeOL50HD6hHH07W+JASIEEAECAAwFAk85
+bQsFAwASdQAACgkQlxC4m8pXrXwKPQgAlkbUsTr7nkq+haOk0jKpaHWEbRMEGMrB
+I3F7E+RDO6V/8y4Jtn04EYDc8GgZMBah+mOgeINq3y8jRMYV5jVtZXv2MWYFUcjM
+kVBKeqhi/pGEjmUdmdt3DlPv3Z+fMTMRmAocI981iY/go8PVPg/+nrR6cFK2xxnO
+R8TacikJBFeSfkkORg1tDzjjYv1B5ZIEkpplepl5ahJBBq7cpYhTdY6Yk0Sz0J8w
+EdffLSaNxrRuWLrRhWzZU7p9bFzfb/7OHc21dJnB7wKv5VvtgE+jiQw9tOKaf5hc
+SgRYuF6heu+B25gc5Uu88lo409mZ7oxQ6hDCn7JHvzh0rhmSN+Kid4kBIgQQAQIA
+DAUCT0qQrQUDABJ1AAAKCRCXELibyletfC9UB/4o2ggJYM0CLxEpP0GU8UKOh3+/
+zm1DN7Qe4kY2iCtF1plKHQaTgt5FlgRCFaiXcVv7WzGz/FnmxonR1leLl+kfRlwy
+PPnoI/AWPCy/NO4Cl5KnjsSmsdDUpObwZ4KYsdilZR7ViJu2swdAIgnXBUwrlRJR
+7CK4TAKrTeonRgVSrVx8Vt//8/cYj73CLq8oY/KK0iHiQrSwo44uyhdiFIAssjyX
+n6/2E+w0zgvPexNSNNROHQ8pjbq+NTY6GwKIGsaej3UTRwQ7psvKXz8y7xdzmOAr
+/khGvxB5gjkx02pimjeia8v66aH6rbnojJMAovNUS4EHdHnulv4rovC8Kf9iiQEi
+BBABAgAMBQJPVdsaBQMAEnUAAAoJEJcQuJvKV618vVEIALFXPBzcAO1SnQarBLzy
+YMVZZumPvSXKnUHAO+6kjApXPJ+qFRdUaSNshZxVKY9Zryblu4ol/fLUTt0CliSD
+IxD6L4GXEm4VYYCl4lPO3bVsJnGITLFwQGHM27EmjVoTiD8Ch7kPq2EXr3dMRgzj
+pdz+6aHGSUfOdLTPXufDvW83bEWGaRVuTJKw+wIrcuRqQ+ucWJgJGwcE4zeHjZad
+Jx1XUm1X+BbI73uiQussyjhhQVVNU7QEdrjyuscaZ/H38wjUwNbylxDPB4I8quC1
+knQ0wSHr7gKpM+E9nhiS14poRqU18u78/sJ2MUPXnQA6533IC238/LP8JgqB+BiQ
+BTSJASIEEAECAAwFAk9ng3cFAwASdQAACgkQlxC4m8pXrXxQRAf/UZlkkpFJj1om
+9hIRz7gS+l7YvTaKSzpo+TBcx3C7aqKJpir6TlMK9cb9HGTHo2Xp1N3FtQL72NvO
+6CcJpBURbvSyb4i0hrm/YcbUC4Y3eajWhkRS3iVfGNFbc/rHthViz0r6Y5lhXX16
+aVkDv5CIFWaF3BiUK0FnHrZiy4FPacUXCwEjv3uf8MpxV5oEmo8Vs1h4TL3obyUz
+qrImFrEMYE/12lkE8iR5KWCaF8eFyl56HL3PPl90JMQBXzhwsFoWCPuwjfM5w6sW
+Ll//zynwxtlJ9CRz9c2vK6aJ8DRu3OfBKN1iiEcNEynksDnNXErn5xXKz3p5pYdq
+e9BLzUQCDYkBIgQQAQIADAUCT3inRgUDABJ1AAAKCRCXELibyletfGMKCADJ97qk
+geBntQ+tZtKSFyXznAugYQmbzJld8U6eGSQnQkM40Vd62UZLdA8MjlWKS8y4A4L2
+0cI14zs5tKG9Q72BxQOw5xkxlLASw1/8WeYEbw7ZA+sPG//q9v3kIkru3sv64mMA
+enZtxsykexRGyCumxLjzlAcL1drWJGUYE2Kl6uzQS7jb+3PNBloQvz6nb3YRZ+Cg
+Ly9D41SIK+fpnV8r4iqhu7r4LmAQ7Q1DF9aoGaYvn2+xLGyWHxJAUet4xkMNOLp6
+k9RF1nbNe4I/sqeCB25CZhCTEvHdjSGTD2yJR5jfoWkwO9w8DZG1Q9WrWqki4hSB
+l0cmcvO34pC1SJYziQEiBBABAgAMBQJPinQFBQMAEnUAAAoJEJcQuJvKV618CFEI
+AJp5BbcV7+JBMRSvkoUcAWDoJSP2ug9zGw5FB8J90PDefKWCKs5Tjayf2TvM5ntq
+5DE9SGaXbloIwa74FoZlgqlhMZ4AtY9Br+oyPJ5S844wpAmWMFc6NnEPFaHQkQ+b
+dJYpRVNd9lzagJP261P3S+S9T2UeHVdOJBgWIq9Mbs4lnZzWsnZfQ4Lsz0aPqe48
+tkU8hw+nflby994qIwNOlk/u+I/lJbNz5zDY91oscXTRl2jV1qBgKYwwCXxyB3j9
+fyVpRl+7QnqbTWcCICVFL+uuYpP0HjdoKNqhzEguAUQQLOB9msPTXfa2hG+32ZYg
+5pzI5V7GCHq0KO6u5Ctj3TGJASIEEAECAAwFAk+cQEEFAwASdQAACgkQlxC4m8pX
+rXzi7AgAx8wJzNdD7UlgdKmrAK//YqH7arSssb33Xf45sVHDpUVA454DXeBrZpi+
+zEuo03o5BhAuf38cwfbkV6jN1mC2N0FZfpy4v7RxHKLYr7tr6r+DRn1L1giX5ybx
+CgY0fLAxkwscWUKGKABWxkz9b/beEXaO2rMt+7DBUdpAOP5FNRQ8WLRWBcMGQiaT
+S4YcNDAiNkrSP8CMLQP+04hQjahxwCgBnksylciqz3Y5/MreybNnTOrdjVDsF0Oe
+t0uLOiWXUZV1FfaGIdb/oBQLg+e1B74p5+q3aF8YI97qAZpPa1qiQzWIDX8LX9QX
+EFyZ3mvqzGrxkFoocXleNPgWT8fRuokBIgQQAQIADAUCT64N/QUDABJ1AAAKCRCX
+ELibyletfDOGCACKfcjQlSxrWlEUrYYZpoBP7DE+YdlIGumt5l6vBmxmt/5OEhqr
++dWwuoiyC5tm9CvJbuZup8anWfFzTTJmPRPsmE4z7Ek+3CNMVM2wIynsLOt1pRFK
+4/5RNjRLbwI6EtoCQfpLcZJ//SB56sK4DoFKH28Ok4cplESPnoMqA3QafdSEA/FL
+qvZV/iPgtTz7vjQkMgrXAIUM4fvKe3iXkAExGXtmgdXHVFoKmHrxJ2DTSvM7/19z
+jGJeu2MhIKHyqEmCk6hLjxyCE5pAH59KlbAQOP1bS28xlRskBApm2wN+LOZWzC62
+HhEReQ50inCGuuubK0PqUQnyYc+lUFxrFpcliQEiBBABAgAMBQJPv9lVBQMAEnUA
+AAoJEJcQuJvKV618AzgH/iRFFCi4qjvoqji1fi7yNPZVOMMO2H13Ks+AfcjRtHuV
+aa30u50ND7TH+XQe6yerTapLh3aAm/sNP99aTxIuwRSlyKEoDs93+XVSgRqPBgbF
+/vxv0ykok3p6L9DxFO/w5cL8JrBhMZoJrEkIBFkwN8tWlcXPRFQvcdBYv3M3DTZU
+qY+UHnOxHvSzsl+LJ0S9Xcd9C5bvYfabmYJvG5eRS3pj1L/y3a6yw6hvY+JtnQAk
+t05TdeHMIgQH/zb8V9wxDzmE0un8LyoC2Jx5TpikQsJSejwK6b3coxVBlngku6+C
+qDAimObZLw6H9xYYIK0FoJs7j5bQZEwUO7OLBgjcMOqJASIEEAECAAwFAk/Rpc8F
+AwASdQAACgkQlxC4m8pXrXw49Qf/TdNbun2htQ+cRWarszOx8BLEiW/x6PVyUQpZ
+nV/0qvhKzlJUjM9hQPcA0AsOjhqtCN6Cy8KXbK/TvPm9D/Nk6HWwD1PomzrJVFk2
+ywGFIuTR+lluKSp7mzm5ym0wJs5cPq731Im31RUQU8ndjLrq9YOf5FVL8NqmcOAU
+4E8d68BbmVCQC5MMr0901FKwKznShfpy7VYN25/BASj8dhnynBYQErqToOJB6Cnd
+JhdTlbfR4SirqAYZZg3XeqGhByytEHE1x7FMWWFYhdNtsnAVhYBbWqAzBs8lF9Jd
+Mhaf0VQU/4z10gVrRtXLR/ixrCi+P4cM/fOQkqd6pwqWkaXt6okBIgQQAQIADAUC
+T+NxIAUDABJ1AAAKCRCXELibyletfFBBCAC6+0TUJDcNaqOxOG1KViY6KYg9NCL8
+pwNK+RKNK/N1V+WGJQH7qDMwRoOn3yogrHax4xIeOWiILrvHK0O6drS1DjsymIhR
+Sm2XbE/8pYmEbuJ9vHh3b/FTChmSAO7dDjSKdWD3dvaY8lSsuDDqPdTX8FzOfrXC
+M22C/YPg7oUG2A5svE1b+yismP4KmVNWAepEuPZcnEMPFgop3haHg9X2+mj/btDB
+Yr6p9kAgIY17nigtNTNjtI0dMLu43aIzedCYHqOlNHiB049jkJs54fMGBjF9qPtc
+m0k44xyKd1/JXWMdNUmtwKsChAXJS3YOciMgIx6tqYUTndrP4I6q1rfriQEiBBAB
+AgAMBQJP9T1VBQMAEnUAAAoJEJcQuJvKV618J9wIAI1lId9SMbEHF6PKXRe154lE
+pap5imMU/lGTj+9ZcXmlf8o2PoMMmb3/E1k+EZUaeSBoOmjS8C2gwd5XFwRrlwAD
+RlK/pG5XsL4h5wmN2fj1ororrJXvqH427PLRQK9yzdwG4+9HTBOxjoS8qZT9plyK
+AJZzAydAMqyseRHgNo0vMwlgrs4ojo+GcFGQHrF3IaUjvVfUPOmIj7afopFdIZmI
+GaSF0TXBzqcZ1chFv/eTBcIuIKRvlaDee5FgV7+nLH2nKOARCLvV/+8uDi2zbr83
+Ip5x2tD3XuUZ0ZWxD0AQWcrLdmGb4lkxbGxvCtsaJHaLXWQ2m760RjIUcwVMEBKJ
+ASIEEAECAAwFAlAGYWsFAwASdQAACgkQlxC4m8pXrXwyVAgAvuvEl6yuGkniWOlv
+uHEusUv/+2GCBg6qV+IEpVtbTCCgiFjYR5GasSp1gpZ5r4BocOlbGdjdJGHTpyK8
+xD1i+6qZWUYhNRg2POXUVzcNEl2hhouwPLOifcmTwAKU76TEv3L5STviL3hWgUR2
+yEUZ3Ut0IGVV6uPER9jpR3qd6O3PeuFkwf+NaGTye4jioLAy3aYwtZCUXzvYmNLP
+90K4y+5yauZteLmNeq26miKC/NQu4snNFClPbGRjHD1ex9KDiAMttOgN4WEq7srT
+rYgtT531WY4deHpNgoPlHPuAfC0H+S6YWuMbgfcb6dV+Rrd8Ij6zM3B/PcjmsYUf
+OPdPtIkBIgQQAQIADAUCUBgtfQUDABJ1AAAKCRCXELibyletfAm3CACQlw21Lfeg
+d8RmIITsfnFG/sfM3MvZcjVfEAtsY3fTK9NiyU0B3yX0PU3ei37qEW+50BzqiStf
+5VhNvLfbZR+yPou7o2MAP31mq3Uc6grpTV64BRIkCmRWg40WMjNI1hv7AN/0atgj
+ATYQXgnEw7mfFb0XZtMTD6cmrz/A9nTPVgZDxzopOMgCCC1ZK4Vpq9FKdCYUaHpX
+3sqnDf+gpVIHkTCMgWLYQOeX5Nl+fgnq6JppaQ3ySZRUDr+uFUs0uvDRvI/cn+ur
+ri92wdDnczjFumKvz/cLJAg5TG2Jv1Jx3wecALsVqQ3gL7f7vr1OMaqhI5FEBqdN
+29L9cZe/ZmkriQEiBBIBCgAMBQJVoNxyBYMHhh+AAAoJEEoz7NUmyPxLD1EH/2eh
+7a4+8A1lPLy2L9xcNt2bifLfFP2pEjcG6ulBoMKpHvuTCgtX6ZPdHpM7uUOje/F1
+CCN0IPB533U1NIoWIKndwNUJjughtoRM+caMUdYyc4kQm29Se6hMPDfyswXE5Bwe
+PmoOm4xWPVOH/cVN04zyLuxdlQZNQF/nJg6PMsz4w5z+K6NGGm24NEPcc72iv+6R
+Uc/ry/7v5cVu4hO5+r104mmNV5yLecQF13cHy2JlngIHXPSlxTZbeJX7qqxE7TQh
+5nviSPgdk89oB5jFSx4g1efXiwtLlP7lbDlxHduomyQuH9yqmPZMbkJt9uZDc8Zz
+MYsDDwlc7BIe5bGKfjqJAhwEEAECAAYFAlSanFIACgkQdzHqU52lcqLdvg//cAEP
+qdN5VTKWEoDFjDS4I6t8+0KzdDWDacVFwKJ8RAo1M2SklDxnIvnzysZd2VHp5Pq7
+i4LYCZo5lDkertQ6LwaQxc4X6myKY4LTA652ObFqsSfgh9kW+aJBBAyeahPQ8CDD
++Yl23+MY5wTsj4qt7KffNzy78vLbYnVnvRQ3/CboVix0SRzg0I3Oi7n3B0lihvXy
+5goy9ikjzZevejMEfjfeRCgoryy9j5RvHH9PF3fJVtUtHCS4f+kxLmbQJ1XqNDVD
+hlFzjz8oUzz/8YXy3im5MY7Zuq4P4wWiI7rkIFMjTYSpz/evxkVlkR74qOngT2pY
+VHLyJkqwh56i0aXcjMZiuu2cymUt2LB9IsaMyWBNJjXr2doRGMAfjuR5ZaittmML
+yZwix9mWVk7tkwlIxmT/IW6Np0qMhDZcWYqPRpf7+MqY3ZYMK4552b8aDMjhXrnO
+OwLsz+UI4bZa1r9dguIWIt2C2b5C1RQ9AsQBPwg7h5P+HhRuFAuDKK+vgV8FRuzR
+JeKkFqwB4y0Nv7BzKbFKmP+V+/krRv+/Dyz9Bz/jyAQgw02u1tPupH9BGhlRyluN
+yCJFTSNj7G+OLU0/l4XNph5OOC7sy+AMZcsL/gsT/TXCizRcCuApNTPDaenACpbv
+g8OoIzmNWhh4LXbAUHCKmY//hEw9PvTZA1xKHgyJAhwEEgECAAYFAlJYsKQACgkQ
+oirk60MpxUV2XQ//b2/uvThkkbeOegusDC4AZfjnL/V3mgk4iYy4AC9hum0R9oNl
+XDR51P1TEw9mC1btHj+7m7Iq1a5ke5wIC7ENZiilr0yPqeWgL5+LC98dz/L85hqA
+wIoGeOfMhrlaVbAZEj4yQTAJDA35vZHVsQmp87il0m+fZX04OBLXBzw86EoAAZ7Q
+EoH4qFcT9k1T363tvNnIm3mEvkQ5WjE1R9uchJa1g7hdlNQlVkjFmPZrJK9fl4z5
+6Dto89Po4Sge48jDH0pias4HATYHsxW819nz5jZzGcxLnFRRR5iITVZi9qzsHP7N
+bUh3qxuWCHS9xziXpOcSZY848xXw63Y5jDJfpzupzu/KHj6CzXYJUEEqp9MluoGb
+/BCCEPzdZ0ovyxFutM/BRcc6DvE6sTDF/UES21ROqfuwtJ6qJYWX+lBIgyCJvj4o
+RdbzxUleePuzqCzmwrIXtoOKW0Rlj4SCeF9yCwUMBTGW5/nCLmN4dwf1KW2RP2Eg
+4ERbuUy7QnwRP5UCl+0ISZJyYUISfg8fmPIdQsetUK9Cj+Q5jpB2GXwELXWnIK6h
+K/6jXp+EGEXSqdIE53vAFe7LwfHiP/D5M71D2h62sdIOmUm3lm7xMOnM5tKlBiV+
+4jJSUmriCT62zo710+6iLGqmUUYlEll6Ppvo8yuanXkYRCFJpSSP7VP0bBqIZgQT
+EQIAJgUCTnc9dgIbIwUJEPPzpwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEIxx
+jTtQcuH1Ut4AoIKjhdf70899d+7JFq3LD7zeeyI0AJ9Z+YyE1HZSnzYi73brScil
+bIV6sbQ7TXlTUUwgUGFja2FnZSBzaWduaW5nIGtleSAod3d3Lm15c3FsLmNvbSkg
+PGJ1aWxkQG15c3FsLmNvbT6IbwQwEQIALwUCTnc9rSgdIGJ1aWxkQG15c3FsLmNv
+bSB3aWxsIHN0b3Agd29ya2luZyBzb29uAAoJEIxxjTtQcuH1tT0An3EMrSjEkUv2
+9OX05JkLiVfQr0DPAJwKtL1ycnLPv15pGMvSzav8JyWN3IhlBBMRAgAdBQJHrJS0
+BQkNMFioBQsHCgMEAxUDAgMWAgECF4AAEgkQjHGNO1By4fUHZUdQRwABAa6SAJ9/
+PgZQSPNeQ6LvVVzCALEBJOBt7QCffgs+vWP18JutdZc7XiawgAN9vmmITAQTEQIA
+DAUCPj6j0QWDCWYAuwAKCRBJUOEqsnKR8iThAJ9ZsR4o37dNGyl77nEqP6RAlJqa
+YgCeNTPTEVY+VXHR/yjfyo0bVurRxT2ITAQTEQIADAUCPkKCAwWDCWIiiQAKCRC2
+9c1NxrokP5aRAKCIaaegaMyiPKenmmm8xeTJSR+fKQCgrv0TqHyvCRINmi6LPucx
+GKwfy7KIRgQQEQIABgUCP6zjrwAKCRCvxSNIeIN0D/aWAKDbUiEgwwAFNh2n8gGJ
+Sw/8lAuISgCdHMzLAS26NDP8T2iejsfUOR5sNriIRgQQEQIABgUCP7RDdwAKCRCF
+lq+rMHNOZsbDAJ0WoPV+tWILtZG3wYqg5LuHM03faQCeKuVvCmdPtro06xDzeeTX
+VrZ14+GIRgQQEQIABgUCQ1uz6gAKCRCL2C5vMLlLXH90AJ0QsqhdAqTAk3SBnO2w
+zuSOwiDIUwCdFExsdDtXf1cL3Q4ilo+OTdrTW2CIRgQTEQIABgUCRPEzJgAKCRD2
+ScT0YJNTDApxAKCJtqT9LCHFYfWKNGGBgKjka0zi9wCcCG3MvnvBzDUqDVebudUZ
+61Sont+ITAQQEQIADAUCQYHLAQWDBiLZiwAKCRAYWdAfZ3uh7EKNAJwPywk0Nz+Z
+Lybw4YNQ7H1UxZycaQCePVhY4P5CHGjeYj9SX2gQCE2SNx+ITAQQEQIADAUCQYHL
+NAWDBiLZWAAKCRCBwvfr4hO2kiIjAJ0VU1VQHzF7yYVeg+bh31nng9OOkwCeJI8D
+9mx8neg4wspqvgXRA8+t2saITAQQEQIADAUCQYHLYgWDBiLZKgAKCRBrcOzZXcP0
+cwmqAJsFjOvkY9c5eA/zyMrOZ1uPB6pd4QCdGyzgbYb/eoPu6FMvVI9PVIeNZReI
+TAQQEQIADAUCQdCTJAWDBdQRaAAKCRB9JcoKwSmnwmJVAKCG9a+Q+qjCzDzDtZKx
+5NzDW1+W+QCeL68seX8OoiXLQuRlifmPMrV2m9+ITAQQEQIADAUCQitbugWDBXlI
+0gAKCRDmG6SJFeu5q/MTAKCTMvlCQtLKlzD0sYdwVLHXJrRUvgCffmdeS6aDpwIn
+U0/yvYjg1xlYiuqITAQSEQIADAUCQCpZOgWDB3pLUgAKCRA8oR80lPr4YSZcAJwP
+4DncDk4YzvDvnRbXW6SriJn1yQCdEy+d0CqfdhM7HGUs+PZQ9mJKBKqITAQSEQIA
+DAUCQD36ugWDB2ap0gAKCRDy11xj45xlnLLfAKC0NzCVqrbTDRw25cUss14RRoUV
+PACeLpEc3zSahJUB0NNGTNlpwlTczlCITAQSEQIADAUCQQ4KhAWDBpaaCAAKCRA5
+yiv0PWqKX/zdAJ4hNn3AijtcAyMLrLhlZQvib551mwCgw6FEhGLjZ+as0W681luc
+wZ6PzW+ITAQSEQIADAUCQoClNAWDBSP/WAAKCRAEDcCFfIOfqOMkAJwPUDhS1eTz
+gnXclDKgf353LbjvXgCeLCWyyj/2d0gIk6SqzaPl2UcWrqiITAQTEQIADAUCPk1N
+hAWDCVdXCAAKCRAtu3a/rdTJMwUMAKCVPkbk1Up/kyPrlsVKU/Nv3bOTZACfW5za
+HX38jDCuxsjIr/084n4kw/uITAQTEQIADAUCQdeAdgWDBc0kFgAKCRBm79vIzYL9
+Pj+8AJ9d7rvGJIcHzTCSYVnaStv6jP+AEACeNHa5yltqieRBCCcLcacGqYK81omI
+TAQTEQIADAUCQhiBDgWDBYwjfgAKCRB2wQMcojFuoaDuAJ9CLYdysef7IsW42UfW
+hI6HjxkzSgCfeEpXS4hEmmGicdpRiJQ/W21aB0GIZQQTEQIAHQULBwoDBAMVAwID
+FgIBAheABQJLcC/KBQkQ8/OnABIHZUdQRwABAQkQjHGNO1By4fWw2wCeJilgEarL
+8eEyfDdYTyRdqE45HkoAnjFSZY8Zg/iXeErHI0r04BRukNVgiHsEMBECADsFAkJ3
+NfU0HQBPb3BzLi4uIHNob3VsZCBoYXZlIGJlZW4gbG9jYWwhIEknbSAqc28qIHN0
+dXBpZC4uLgAKCRA5yiv0PWqKX+9HAJ0WjTx/rqgouK4QCrOV/2IOU+jMQQCfYSC8
+JgsIIeN8aiyuStTdYrk0VWCIjwQwEQIATwUCRW8Av0gdAFNob3VsZCBoYXZlIGJl
+ZW4gYSBsb2NhbCBzaWduYXR1cmUsIG9yIHNvbWV0aGluZyAtIFdURiB3YXMgSSB0
+aGlua2luZz8ACgkQOcor9D1qil+g+wCfcFWoo5qUl4XTE9K8tH3Q+xGWeYYAnjii
+KxjtOXc0ls+BlqXxbfZ9uqBsiQIiBBABAgAMBQJBgcuFBYMGItkHAAoJEKrj5s5m
+oURoqC8QAIISudocbJRhrTAROOPoMsReyp46Jdp3iL1oFDGcPfkZSBwWh8L+cJjh
+dycIwwSeZ1D2h9S5Tc4EnoE0khsS6wBpuAuih5s//coRqIIiLKEdhTmNqulkCH5m
+imCzc5zXWZDW0hpLr2InGsZMuh2QCwAkB4RTBM+r18cUXMLV4YHKyjIVaDhsiPP/
+MKUj6rJNsUDmDq1GiJdOjySjtCFjYADlQYSD7zcd1vpqQLThnZBESvEoCqumEfOP
+xemNU6xAB0CL+pUpB40pE6Un6Krr5h6yZxYZ/N5vzt0Y3B5UUMkgYDSpjbulNvaU
+TFiOxEU3gJvXc1+h0BsxM7FwBZnuMA8LEA+UdQb76YcyuFBcROhmcEUTiducLu84
+E2BZ2NSBdymRQKSinhvXsEWlH6Txm1gtJLynYsvPi4B4JxKbb+awnFPusL8W+gfz
+jbygeKdyqzYgKj3M79R3geaY7Q75Kxl1UogiOKcbI5VZvg47OQCWeeERnejqEAdx
+EQiwGA/ARhVOP/1l0LQA7jg2P1xTtrBqqC2ufDB+v+jhXaCXxstKSW1lTbv/b0d6
+454UaOUV7RisN39pE2zFvJvY7bwfiwbUJVmYLm4rWJAEOJLIDtDRtt2h8JahDObm
+3CWkpadjw57S5v1c/mn+xV9yTgVx5YUfC/788L1HNKXfeVDq8zbAiQIiBBMBAgAM
+BQJCnwocBYMFBZpwAAoJENjCCglaJFfPIT4P/25zvPp8ixqV85igs3rRqMBtBsj+
+5EoEW6DJnlGhoi26yf1nasC2frVasWG7i4JIm0U3WfLZERGDjR/nqlOCEqsP5gS3
+43N7r4UpDkBsYh0WxH/ZtST5llFK3zd7XgtxvqKL98l/OSgijH2W2SJ9DGpjtO+T
+iegq7igtJzw7Vax9z/LQH2xhRQKZR9yernwMSYaJ72i9SyWbK3k0+e95fGnlR5pF
+zlGq320rYHgD7v9yoQ2t1klsAxK6e3b7Z+RiJG6cAU8o8F0kGxjWzF4v8D1op7S+
+IoRdB0Bap01ko0KLyt3+g4/33/2UxsW50BtfqcvYNJvU4bZns1YSqAgDOOanBhg8
+Ip5XPlDxH6J/3997n5JNj/nk5ojfd8nYfe/5TjflWNiput6tZ7frEki1wl6pTNbv
+V9C1eLUJMSXfDZyHtUXmiP9DKNpsucCUeBKWRKLqnsHLkLYydsIeUJ8+ciKc+EWh
+FxEY+Ml72cXAaz5BuW9L8KHNzZZfez/ZJabiARQpFfjOwAnmhzJ9r++TEKRLEr96
+taUI9/8nVPvT6LnBpcM38Td6dJ639YvuH3ilAqmPPw50YvglIEe4BUYD5r52Seqc
+8XQowouGOuBX4vs7zgWFuYA/s9ebfGaIw+uJd/56Xl9ll6q5CghqB/yt1EceFEnF
+CAjQc2SeRo6qzx22iEYEEBECAAYFAkSAbycACgkQCywYeUxD5vWDcACfQsVk/XGi
+ITFyFVQ3IR/3Wt7zqBMAoNhso/cX8VUfs2BzxPvvGS3y+5Q9iEYEEBECAAYFAkUw
+ntcACgkQOI4l6LNBlYkyFgCbBcw5gIii0RTDJsdNiuJDcu/NPqEAniSq9iTaLjgF
+HZbaizUU8arsVCB5iEYEEBECAAYFAkWho2sACgkQu9u2hBuwKr6bjwCfa7ZK6O+X
+mT08Sysg4DEoZnK4L9UAoLWgHuYg35wbZYx+ZUTh98diGU/miF0EExECAB0FAj4+
+owwFCQlmAYAFCwcKAwQDFQMCAxYCAQIXgAAKCRCMcY07UHLh9XGOAJ4pVME15/DG
+rUDohtGv2z8a7yv4AgCeKIp0jWUWE525QocBWms7ezxd6syIXQQTEQIAHQUCR6yU
+zwUJDTBYqAULBwoDBAMVAwIDFgIBAheAAAoJEIxxjTtQcuH1dCoAoLC6RtsD9K3N
+7NOxcp3PYOzH2oqzAKCFHn0jSqxk7E8by3sh+Ay8yVv0BYhdBBMRAgAdBQsHCgME
+AxUDAgMWAgECF4AFAkequSEFCQ0ufRUACgkQjHGNO1By4fUdtwCfRNcueXikBMy7
+tE2BbfwEyTLBTFAAnifQGbkmcARVS7nqauGhe1ED/vdgiF0EExECAB0FCwcKAwQD
+FQMCAxYCAQIXgAUCS3AuZQUJEPPyWQAKCRCMcY07UHLh9aA+AKCHDkOBKBrGb8tO
+g9BIub3LFhMvHQCeIOOot1hHHUlsTIXAUrD8+ubIeZaJARwEEgECAAYFAkvCIgMA
+CgkQ3PTrHsNvDi8eQgf/dSx0R9Klozz8iK79w00NOsdoJY0Na0NTFmTbqHg30XJo
+G62cXYgc3+TJnd+pYhYi5gyBixF/L8k/kPVPzX9W0YfwChZDsfTw0iDVmGxOswiN
+jzSo0lhWq86/nEL30Khl9AhCC1XFNRw8WZYq9Z1qUXHHJ2rDARaedvpKHOjzRY0N
+dx6R2zNyHDx2mlfCQ9wDchWEuJdAv0uHrQ0HV9+xq7lW/Q3L/V5AuU0tiowyAbBL
+PPYrB6x9vt2ZcXS7BOy8SfQ1i8W2QDQ/Toork4YwBiv6WCW/ociy7paAoPOWV/Nf
+2S6hDispeecbk7wqpbUj5klDmwrlgB/jmoAXWEnbsYkBIgQQAQIADAUCSSpooAUD
+ABJ1AAAKCRCXELibyletfFOMCACpP+OVZ7lH/cNY+373c4FnSI0/S5PXS0ABgdd4
+BFWRFWKrWBeXBGc8sZfHOzVEwkzV96iyHbpddeAOAkEA4OVPW1MMFCmlHxi2s9/N
+JrSrTPVfQOH5fR9hn7Hbpq/ETw0IoX1FKo7vndMnHZnFEnI+PDXLcdMYQgljYzhT
+xER4vYY0UKu8ekSshUy4zOX7XSJxwqPUvps8qs/TvojIF+vDJvgFYHVkgvS+shp8
+Oh/exg9vKETBlgU87Jgsqn/SN2LrR/Jhl0aLd0G0iQ+/wHmVYdQUMFaCZwk/BKNa
+XPzmGZEUZ3RNbYa19Mo7hcE3js76nh5YMxFvxbTggVu4kdFkiQEiBBABAgAMBQJK
+M06IBQMAEnUAAAoJEJcQuJvKV618F4gH/innejIHffGMk8jYix4ZZT7pW6ApyoI+
+N9Iy85H4L+8rVQrtcTHyq0VkcN3wPSwtfZszUF/0qP6P8sLJNJ1BtrHxLORYjJPm
+gveeyHPzA2oJl6imqWUTiW822fyjY/azwhvZFzxmvbFJ+r5N/Z57+Ia4t9LTSqTN
+HzMUYaXKDaAqzZeK7P0E6XUaaeygbjWjBLQ1O0ezozAy+Kk/gXApmDCGFuHSFe7Z
+mgtFcbXLM2XFQpMUooETD2R8MUsd+xnQsff/k6pQOLxi+jUEsWSr/iqmvlk6gZ4D
+pemBjuhcXYlxJYjUaX9Zmn5s+ofF4GFxRqXoY7l9Z+tCM9AX37lm6S+JASIEEAEC
+AAwFAkpEcgoFAwASdQAACgkQlxC4m8pXrXz2mgf/RQkpmMM+5r8znx2TpRAGHi5w
+ktvdFxlvPaOBWE28NDwTrpcoMqo9kzAiuvEQjVNihbP21wR3kvnQ84rTAH0mlC2I
+uyybggpqwzOUl+Wi0o+vk8ZA0A0dStWRN8uqneCsd1XnqDe1rvqC4/9yY223tLmA
+kPvz54ka2vX9GdJ3kxMWewhrVQSLCktQpygU0dujGTDqJtnk0WcBhVF9T87lv3W2
+eGdPielzHU5trXezmGFj21d56G5ZFK8co7RrTt4qdznt80glh1BTGmhLlzjMPLTe
+dcMusm3D1QB9ITogcG94ghSf9tEKmmRJ6OnnWM5Kn9KcL63E5oj2/lY9H54wSYkB
+IgQQAQIADAUCSlY+RwUDABJ1AAAKCRCXELibyletfOOQB/0dyJBiBjgf+8d3yNID
+pDktLhZYw8crIjPBVdOgX12xaUYBTGcQITRVHSggzffDA5BQXeUuWhpL4QB0uz1c
+EPPwSMiWiXlBtwF5q6RVf3PZGJ9fmFuTkPRO7SruZeVDo9WP8HjbQtOLukYf566e
+grzAYR9p74UgWftpDtmrqrRTobiuvsFBxosbeRCvEQCrN0n+p5D9hCVB88tUPHnO
+WA4mlduAFZDxQWTApKQ92frHiBqy+M1JFezz2OM3fYN+Dqo/Cb7ZwOAA/2dbwS7o
+y4sXEHbfWonjskgPQwFYB23tsFUuM4uZwVEbJg+bveglDsDStbDlfgArXSL/0+ak
+lFcHiQEiBBABAgAMBQJKaAqEBQMAEnUAAAoJEJcQuJvKV618rH0H/iCciD4U6YZN
+JBj0GN7/Xt851t9FWocmcaC+qtuXnkFhplXkxZVOCU4VBMs4GBoqfIvagbBTyfV4
+Di+W8Uxr+/1jiu3l/HvoFxwdwNkGG6zNBhWSjdwQpGwPvh5ryV1OfLX/mgQgdDmx
+vqz5+kFDUj4m7uLaeuU2j1T0lR4zU0yAsbt7J3hwfqJCXHOc9bm5nvJwMrSm+sdC
+TP5HjUlwHr9mTe8xuZvj6sO/w0P4AqIMxjC9W7pT9q0ofG2KSTwt7wFbh05sbG4U
+QYOJe4+Soh3+KjAa1c0cvmIh4cKX9qfCWwhhdeNfh1A9VTHhnl5zTv/UjvnQtjhl
+H/Fq1eBSKcSJASIEEAECAAwFAkp5LgoFAwASdQAACgkQlxC4m8pXrXwY6wgAg3f8
+76L3qDZTYlFAWs3pXBl8GsUr1DEkTlEDZMZKDM3wPmhaWBR1hMA3y6p3aaCUyJIJ
+BEneXzgyU9uqCxXpC78d5qc3xs/Jd/SswzNYuvuzLYOw5wN5L31SLmQTQ8KqE0uo
+RynBmtDCQ4M2UKifSnv+0+3mPh85LVAS481GNpL+VVfCYtKesWNu40+98Yg6L9NG
+WwRTfsQbcdokZo44Jz7Y7f81ObC4r/X1DgPj2+d4AU/plzDcdrbINOyprs+7340e
+cnaGO4Lsgd19b1CvcgJgltRquu3kRvd+Ero2RYpDv6GVK8Ea0Lto4+b/Ae8cLXAh
+QnaWQCEWmw+AU4Jbz4kBIgQQAQIADAUCSo5fvQUDABJ1AAAKCRCXELibyletfA08
+B/9w8yJdc8K+k07U30wR/RUg3Yb2lBDygmy091mVsyB0RGixBDXEPOXBqGKAXiV1
+QSMAXM2VKRsuKahY2HFkPbyhZtjbdTa7Pr/bSnPvRhAh9GNWvvRg2Kp3qXDdjv9x
+ywEghKVxcEIVXtNRvpbqRoKmHzIExvUQck5DM1VwfREeYIoxgs4035WADhVMdngQ
+S2Gt8P2WaU/p8EZhFGg6X8KtOlD68zGboaJe0hj2VDc+Jc+KdjRfE3fW5IToid/o
+DkUaIW6tB3WkXb0g6D/2hrEJbX3headChHKSB8eQdOR9bcCJDhhU8csd501qmrhC
+ctmvlpeWQZdIQdk6sABPWeeCiQEiBBABAgAMBQJKoBJHBQMAEnUAAAoJEJcQuJvK
+V618Ml8H/1D88/g/p9fSVor4Wu5WlMbg8zEAik3BIxQruEFWda6nART6M9E7e+P1
+++UHZsWYs6l9ROpWxRLG1Yy9jLec2Y3nUtb20m65p+IVeKR2a9PHW35WZDV9dOYP
+GZabKkO1clLeWLVgp9LRjZ+AeRG+ljHqsULXro1dwewLTB/gg9I2vgNv6dKxyKak
+nM/GrqZLATAq2KoaE/u/6lzRFZIzZnLtjZh8X7+nS+V8v9IiY4ntrpkrbvFk30U6
+WJp79oBIWwnW/84RbxutRoEwSar/TLwVRkcZyRXeJTapbnLGnQ/lDO1o1d7+Vbjd
+q/Sg/cKHHf7NthCwkQNsCnHL0f51gZCJASIEEAECAAwFAkqoEAAFAwASdQAACgkQ
+lxC4m8pXrXwE/Af/XD4R/A5R6Ir/nCvKwCTKJmalajssuAcLEa2pMnFZYO/8rzLO
++Gp8p0qFH9C4LFwA0NvR5q6X/swuROf4zxljSvNcdlQVaAfJ2ZDEgJ5GXzsPplrv
+SAI9jS3LL7fSWDZgKuUe0a4qx7A0NgyGMUYGhP+QlRFa8vWEBI9fANd/0mMqAeBV
+qQyOH0X1FiW1Ca2Jn4NKfuMy9GEvRddVIbB1LvoNVtXPNzeeKMyNb9Jdx1MFWssy
+COBP2DayJKTmjvqPEc/YOjOowoN5sJ/jn4mVSTvvlTooLiReSs6GSCAjMVxN7eYS
+/Oyq6Iu1JDcJvmB8N2WixAZtAVgF8OA7CWXKVYkBIgQQAQIADAUCSrnHiQUDABJ1
+AAAKCRCXELibyletfPChB/9uECti1dZeNuFsd0/RuGyRUVlrrhJE6WCcOrLO9par
+rPbewbKBmjSzB0MygJXGvcC06mPNuquJ7/WpxKsFmfg4vJBPlADFKtgRUy9BLzjC
+eotWchPHFBVW9ftPbaQViSUu7d89NLjDDM5xrh80puDIApxoQLDoIrh3T1kpZx56
+jSWv0gelFUMbXAzmqkJSyL4Xdh1aqzgUbREd7Xf2ICzuh0sV6V7c/AwWtjWEGEsA
+HZaiQDywZwbC18GwrMLiAzGWb/AScFDQRCZKJDjL+Ql8YT6z+ZMVr8gb7CIU5PKY
+dhiIf2UVTQwLAoW7lNRCQQAqcGjK3IMIz7SO/yk4HmVUiQEiBBABAgAMBQJK3gjG
+BQMAEnUAAAoJEJcQuJvKV618jkEH+wb0Zv9z7xQgpLMowVuBFQVu8/z7P5ASumyB
+PUO3+0JVxSHBhlCKQK7n11m1fhuGt2fCxXhSU6LzXj36rsKRY53lGZ9QhvqFUtQH
+3Xb2IQLIJC4UKjG2jSSCdcuA/x98bwp2v7O03rn7ndCS16CwXnRV3geQoNipRKMS
+DajKPpZv1RiZm8pMKqEb8WSw352xWoOcxuffjlsOEwvJ85SEGCAZ9tmIlkZOc7Ai
+QONDvii9b8AYhQ60RIQC0HP2ASSmK0V92VeFPxHmAygdDQgZNVtbVxgnnt7oTNEu
+VRXNY+z4OfBArp7R+cTsvijDRZY4kML1n22hUybwoxUEvjqZV2+JASIEEAECAAwF
+AkrvOlQFAwASdQAACgkQlxC4m8pXrXxrPAgArXiNgZirNuBhfNCXlkzkCHLx5wnV
+e4SmTpbWzTwWw7+qk7d4l9hlWtdImISORINzo7f4ShSUzJX2GciNaXhaHRo7+y5O
+Zbu82jQb09aQQj/nibKYuqxqUrobTEm+DuYz3JUQZm2PsPcHLS8mX9cxvrJUncPG
+nXEV0DRaq71SGWDprtkvBbp6i38aY3sIhYgz8wM5m1szKDtjywmBYcFehIdozt9z
+hm7wZshzRWQX1+Rf/pIsnk+OzBIa34crSemTnacbV/B7278z2XAyziPNFuqz0xu+
+iltOmYmayfNWAmumuw9NcuwWMlth6Mc2HLrpo0ZBheJ6iuDMPsHnwqdB/4kBIgQQ
+AQIADAUCSwBd2gUDABJ1AAAKCRCXELibyletfP6tB/4m1w0BtlkJgtS6E+B/ns14
+z4A4PGors+n+MYm05qzvi+EnDF/sytCmVcKeimrtvDcfoDtKAFFvJjcYXfnJdGWm
+Pu0SJMRL5KKCirAKwZmU/saxOgoB5QLNw+DHPteJ3w9GmWlGxIqG1r15WC5duzBC
+y3FsnjJYG3jaLnHOO9yXXb5h0kUTORfUKdvAr1gxF2KoatZWqGoaPPnHoqb88rjt
+zk8I7gDqoXnzh8wLxa0ZYvfTC/McxdWTrwXLft+krmMQ18iIZEne2hvVLNJVuluU
+oiWLeHA8iNCQ4W4WTdLc1mCnCjGTMX/MN41uLH0C9Ka4R6wEaqj4lPDk1B/1TV+Q
+iQEiBBABAgAMBQJLEYGrBQMAEnUAAAoJEJcQuJvKV618naIH/2t9aH5mBTKBN6fU
+qhrf79vIsjtI/QNS5qisBISZMX3/1/0Gu6WnxkPSfdCUJMWCjMcnVj7KU2wxTHHG
+VpAStd9r2afUNxRyqZwzwyytktuZok0XngAEDYDDBS3ssu2R4uWLCsC2ysXEqO/5
+tI5YrTWJZrfeIphTaYP5hxrMujvqy3kEwKKbiMz91cDeiLS+YCBcalj5n/1dMYf7
+8U8C6ieurxAg/L8h6x25VM4Ilx4MmG2T8QGtkkUXd+Fd/KYWmf0LE5LLPknf0Hhw
+oVslPXeinp4FsHK/5wzviv4YZpzuTqs9NlKcMsa4IuuPOB0FDf0pn+OFQbEg9QwY
+2gCozK+JASIEEAECAAwFAksjTdQFAwASdQAACgkQlxC4m8pXrXwlogf/XBGbXRVX
+LMaRN4SczOjwT3/tUCriTkb3v+zKjRG90zFhYAccjn7w+7jKQicjq6quQG1EH2X4
+/Su6ps1lDLqGHHhiJW3ZhxQScLZmhdAYsh2qG4GP/UW3QjXG7c61t+H3olvWg2cr
+wqCxxFZAgkAAkr9xcHWFZJEQeXoob6cCZObaUnHSANdmC6s5lUxXYa2bmL7Q3UB4
+4KCzDvAfbPZKJOw9k0qb3lc11zx+vGdyZFbm4R0+3LPp/vT0b3GlSbbF9lU1GOXh
+VaphrgFFa76dmjfHCkPplXAkK1VSIU/aPGAefduTFMdlSZpdMtJ5AULjGcszBDlR
+pLlPxvqVa0ZpgIkBIgQQAQIADAUCSycmkgUDABJ1AAAKCRCXELibyletfHlNCACp
+1YespiHfQt2alcscE5zgfETEHHic8Ai6pNkU9HT4TeWcFHEDe5QqfYcpjLrQvBXS
+kSvxEittbyRdv+e+j5Z+HyHjiG8nAQBL6qy9eHqQE4+d7gYs6DTk7sG9ZMYphREb
+ltzD+F4hVCQdLT8LNr0eVFN7ehqECScDaCG8/Qyti+l/0M902/Yn+mz0ilOiUdWJ
+9x6LPaIINtb1gsYDEylLjwGIZmI0r5Kh9wYoV4vnNezFbxO1uRiW0B7iaPjIEsbt
+OOKp7wx2aX+DM3N9F3BtaIY8XnzcnomNm83SNsgmgrZljpQltUnNqIhNM8DupQ+I
+WOV5gtl6pTC7CgeVTVyRiQEiBBABAgAMBQJLOGXuBQMAEnUAAAoJEJcQuJvKV618
+ll4IAKJ9mm4jb0c8fe9+uDI8eCJRbzNbVXm8zWzpA8GUtQAakwxoKv332QP1Wa1P
+odni/e3EMhsSREOZJJv79YqGxGRBTE9Kb/VjM34nas4XSnXKW28XWhKyIw+XwQAi
+nY2swFHh+83Htr/mwTdJfS2aEYl2zboBvd/JZCdhOGU2GH737S/3uEczoKkfVQ/w
+OTM8X1xWwlYWqx23k/DsGcuDs9lA2g7Mx7DSqBtVjaTkn9h0zATzXLDkmP4SAUVj
+cZ83WDpFre5WnizZjdXlBMM5OCexp5WpmzyHLTnaBFK4jEmnsk5C2Rnoyp8Ivz6g
+Ecg1tRbEXijRw++d2TFYlJwLKtiJASIEEAECAAwFAktKMicFAwASdQAACgkQlxC4
+m8pXrXxqHQgAuYY5scKrh0m/GS9EYnyC9494lOlO6iytU0CpE6oBC31M3hfX/Dbj
+UbcS5szZNU+2CPYo4ujQLZ7suN7+tTjG6pZFfMevajT9+jsL+NPMF8RLdLOVYmbl
+TmSQGNO+XGEYaKYH5oZIeIW5AKCgi2ozkdFlBBLAx7Kqo/FyybhkURFEcvEyVmgf
+3KLV7IIiX/fYLfoCMCJ/Lcm9/llSFB1n8Nvg66Xd533DKoHjueD3jyaNAVlo2mq/
+sIAv++kntvOiB3GDK5pfwHZ78WWiCpsWZpE5gzAnzJ1Y0WEigRo0PVLu3cLO0jLG
+23d+H/CbfZ8rkajHJeCDQF7YVmP0t0nYpYkBIgQQAQIADAUCS1v+ZgUDABJ1AAAK
+CRCXELibyletfNS/CACqt2TkB86mjqM+cJ74+dWBvJ2aFuURuxzm95i9Q/W/hU08
+2iMbC3+0k2oD8CrTOe61P+3oRyLjv/UEDUNzLncNe2YsA9JeV+4hvPwH5Vp3Om13
+089fCKZUbqslXNKkHiWYU+zAaZJXEuGRmRz0HbQIeAMOWF4oa226uo1e4ws1Jhc+
+F3E/ApCRyFBqBUdL05hapQLditYpsBjIdiBGpjzidMLE2wX2W4ZpAdN0U6BIyIqR
+mTPjbSkvzS9kSWFmfhQgnBDKEYJpVZgE1sN52rYC1sDeGeiuKxlzjVov9MMhYMWa
+Zo3R5o3F2iIM/BK6FbC252lf/Mhu3ICuXujNBZNYiQEiBBABAgAMBQJLbSH4BQMA
+EnUAAAoJEJcQuJvKV618kd0IAJLLwDH6gvgAlBFklQJXqQxUdcSOOVMAWtlHgWOy
+ozjgomZZBkRL8dtCDr9YBMcj5czcQ3qpmLJdppXhKB+kJV2iUXfDMSFXwJ4wLfIs
+8FNnXw8H5U01oBkGH/Ku6ngL9Vwt+MjYHtCWkw9QueUKZnDudX9qIzLAIt+mwSTu
+A6+fY4VWIg40AA0v3exaQM55YR/UhlKunpGG9o8Qkq77dMEbTMpOmBoLbOMRB3Dd
+MAvVU6G2l6Pcb7KobVCuOBnb6batXARV/G8sw+nzfJ16fr/KobZT2A6m+Jrqk4dl
+F14ljLbz16O5JGUPAryN2G2ddBdSAy7dtFSVhWWiWC9n88q5Ag0EPj6jHRAIAO/h
+iX8WzHWOMLJT54x/axeDdqn1rBDf5cWmaCWHN2ujNNlgpx5emoU9v7QStsNUCOGB
+bXkeO4Ar7YG+jtSR33zqNh3y5kQ0YkY3dQ0wh6nsl+wh4XIIY/3TUZVtmdJeUBRH
+JlfVNFYad2hX1guFI37Ny1PoZAFsxO82g+XB/Se8r/+sbmVcONdcdIeFKrE3FjLt
+IjNQcxC6l9Q2Oy8KDxG/zvUZG3+H5i3tdRMyGgmuD6gEV0GXOHYUopzLeit1+Aa0
+bCk36Mwbu+BeOw/CJW3+b0mB27hOaf9aCA855IP6fJFvtxcblq8nHIqhU3Dc9tec
+sl9/S1xZ5S8ylG/xeRsAAwUH/i8KqmvAhq0X7DgCcYputwh37cuZlHOa1Ep07JRm
+BCDgkdQXkGrsj2Wzw7Aw/TGdWWkmn2pxb8BRui5cfcZFO7c6vryi6FpJuLucX975
++eVY50ndWkPXkJ1HF4i+HJwRqE2zliN/RHMs4LJcwXQvvjD43EE3AO6eiVFbD+qA
+AdxUFoOeLblKNBHPG7DPG9xL+Ni5rkE+TXShxsB7F0z7ZdJJZOG0JODmox7IstQT
+GoaU9u41oyZTIiXPiFidJoIZCh7fdurP8pn3X+R5HUNXMr7M+ba8lSNxce/F3kmH
+0L7rsKqdh9d/aVxhJINJ+inVDnrXWVoXu9GBjT8Nco1iU9SIVAQYEQIADAUCTnc9
+7QUJE/sBuAASB2VHUEcAAQEJEIxxjTtQcuH1FJsAmwWK9vmwRJ/y9gTnJ8PWf0BV
+roUTAKClYAhZuX2nUNwH4vlEJQHDqYa5yQ==
+=ghXk
+-----END PGP PUBLIC KEY BLOCK-----
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx-badbots.filter b/packer/jambonz-mini/proxmox/files/nginx-badbots.filter
new file mode 100644
index 0000000..12d4105
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-badbots.filter
@@ -0,0 +1,24 @@
+# Fail2Ban configuration file
+#
+# Regexp to catch known spambots and software alike. Please verify
+# that it is your intent to block IPs which were driven by
+# above mentioned bots.
+
+
+[Definition]
+
+badbotscustom = EmailCollector|WebEMailExtrac|TrackBack/1\.02|sogou music spider|(?:Mozilla/\d+\.\d+ )?Jorgee
+badbots = Atomic_Email_Hunter/4\.0|atSpider/1\.0|autoemailspider|bwh3_user_agent|China Local Browse 2\.6|ContactBot/0\.2|ContentSmartz|DataCha0s/2\.0|DBrowse 1\.4b|DBrowse 1\.4d|Demo Bot DOT 16b|Demo Bot Z 16b|DSurf15a 01|DSurf15a 71|DSurf15a 81|DSurf15a VA|EBrowse 1\.4b|Educate Search VxB|EmailSiphon|EmailSpider|EmailWolf 1\.00|ESurf15a 15|ExtractorPro|Franklin Locator 1\.8|FSurf15a 01|Full Web Bot 0416B|Full Web Bot 0516B|Full Web Bot 2816B|Guestbook Auto Submitter|Industry Program 1\.0\.x|ISC Systems iRc Search 2\.1|IUPUI Research Bot v 1\.9a|LARBIN-EXPERIMENTAL \(efp@gmx\.net\)|LetsCrawl\.com/1\.0 \+http\://letscrawl\.com/|Lincoln State Web Browser|LMQueueBot/0\.2|LWP\:\:Simple/5\.803|Mac Finder 1\.0\.xx|MFC Foundation Class Library 4\.0|Microsoft URL Control - 6\.00\.8xxx|Missauga Locate 1\.0\.0|Missigua Locator 1\.9|Missouri College Browse|Mizzu Labs 2\.2|Mo College 1\.9|MVAClient|Mozilla/2\.0 \(compatible; NEWT ActiveX; Win32\)|Mozilla/3\.0 \(compatible; Indy Library\)|Mozilla/3\.0 \(compatible; scan4mail \(advanced version\) http\://www\.peterspages\.net/?scan4mail\)|Mozilla/4\.0 \(compatible; Advanced Email Extractor v2\.xx\)|Mozilla/4\.0 \(compatible; Iplexx Spider/1\.0 http\://www\.iplexx\.at\)|Mozilla/4\.0 \(compatible; MSIE 5\.0; Windows NT; DigExt; DTS Agent|Mozilla/4\.0 efp@gmx\.net|Mozilla/5\.0 \(Version\: xxxx Type\:xx\)|NameOfAgent \(CMS Spider\)|NASA Search 1\.0|Nsauditor/1\.x|PBrowse 1\.4b|PEval 1\.4b|Poirot|Port Huron Labs|Production Bot 0116B|Production Bot 2016B|Production Bot DOT 3016B|Program Shareware 1\.0\.2|PSurf15a 11|PSurf15a 51|PSurf15a VA|psycheclone|RSurf15a 41|RSurf15a 51|RSurf15a 81|searchbot admin@google\.com|ShablastBot 1\.0|snap\.com beta crawler v0|Snapbot/1\.0|Snapbot/1\.0 \(Snap Shots, \+http\://www\.snap\.com\)|sogou develop spider|Sogou Orion spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sogou spider|Sogou web spider/3\.0\(\+http\://www\.sogou\.com/docs/help/webmasters\.htm#07\)|sohu agent|SSurf15a 11 |TSurf15a 11|Under the Rainbow 2\.2|User-Agent\: Mozilla/4\.0 \(compatible; MSIE 6\.0; Windows NT 5\.1\)|VadixBot|WebVulnCrawl\.unknown/1\.0 libwww-perl/5\.803|Wells Search II|WEP Search 00
+
+failregex = ^ -.*"(GET|POST|HEAD).*HTTP.*"(?:%(badbots)s|%(badbotscustom)s)"$
+
+ignoreregex =
+
+datepattern = ^[^\[]*\[({DATE})
+ {^LN-BEG}
+
+# DEV Notes:
+# List of bad bots fetched from http://www.user-agents.org
+# Generated on Thu Nov 7 14:23:35 PST 2013 by files/gen_badbots.
+#
+# Author: Yaroslav Halchenko
diff --git a/packer/jambonz-mini/proxmox/files/nginx-badbots.jail b/packer/jambonz-mini/proxmox/files/nginx-badbots.jail
new file mode 100644
index 0000000..318ebe1
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-badbots.jail
@@ -0,0 +1,8 @@
+[nginx-badbots]
+
+enabled = true
+port = http,https
+filter = nginx-badbots
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/proxmox/files/nginx-nohome.jail b/packer/jambonz-mini/proxmox/files/nginx-nohome.jail
new file mode 100644
index 0000000..ad80d5a
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-nohome.jail
@@ -0,0 +1,8 @@
+[nginx-nohome]
+
+enabled = true
+port = http,https
+filter = nginx-nohome
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/proxmox/files/nginx-noproxy.filter b/packer/jambonz-mini/proxmox/files/nginx-noproxy.filter
new file mode 100644
index 0000000..e6742db
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-noproxy.filter
@@ -0,0 +1,5 @@
+[Definition]
+
+failregex = ^ -.*GET http.*
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx-noproxy.jail b/packer/jambonz-mini/proxmox/files/nginx-noproxy.jail
new file mode 100644
index 0000000..0760f23
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-noproxy.jail
@@ -0,0 +1,8 @@
+[nginx-noproxy]
+
+enabled = true
+port = http,https
+filter = nginx-noproxy
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/proxmox/files/nginx-noscript.filter b/packer/jambonz-mini/proxmox/files/nginx-noscript.filter
new file mode 100644
index 0000000..dddf94d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-noscript.filter
@@ -0,0 +1,5 @@
+[Definition]
+
+failregex = ^ -.*GET.*(\.php|\.asp|\.exe|\.pl|\.cgi|\.scgi)
+
+ignoreregex =
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx-noscript.jail b/packer/jambonz-mini/proxmox/files/nginx-noscript.jail
new file mode 100644
index 0000000..a21180d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx-noscript.jail
@@ -0,0 +1,8 @@
+[nginx-noscript]
+
+enabled = true
+port = http,https
+filter = nginx-noscript
+logpath = /var/log/nginx/access.log
+maxretry = 1
+bantime = 86400
diff --git a/packer/jambonz-mini/proxmox/files/nginx.api b/packer/jambonz-mini/proxmox/files/nginx.api
new file mode 100644
index 0000000..3b79aee
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx.api
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name api.your_domain.com; # enter the app sub-domain that you setup in 11
+ location / {
+ proxy_pass http://localhost:3000; # point the reverse proxy to the api server on port 3000
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx.default b/packer/jambonz-mini/proxmox/files/nginx.default
new file mode 100644
index 0000000..4de5bd0
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx.default
@@ -0,0 +1,54 @@
+server {
+ listen 80;
+ server_name _;
+
+ location /api/ {
+ rewrite ^/api/(.*)$ /$1 break;
+ proxy_pass http://localhost:3002;
+ proxy_set_header Host $host;
+ }
+
+ location / {
+ proxy_pass http://localhost:3001;
+ proxy_set_header Host $host;
+ }
+}
+
+server {
+ listen 80;
+ server_name grafana.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3010;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
+
+server {
+ listen 80;
+ server_name homer.your_domain.com;
+ location / {
+ proxy_pass http://localhost:9080;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
+
+server {
+ listen 80;
+ server_name jaeger.your_domain.com;
+ location / {
+ proxy_pass http://localhost:16686;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx.grafana b/packer/jambonz-mini/proxmox/files/nginx.grafana
new file mode 100644
index 0000000..010d3fd
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx.grafana
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name grafana.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx.homer b/packer/jambonz-mini/proxmox/files/nginx.homer
new file mode 100644
index 0000000..df03320
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx.homer
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name homer.your_domain.com;
+ location / {
+ proxy_pass http://localhost:9080;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/nginx.public-apps b/packer/jambonz-mini/proxmox/files/nginx.public-apps
new file mode 100644
index 0000000..2e99ccf
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/nginx.public-apps
@@ -0,0 +1,12 @@
+server {
+ listen 80;
+ server_name public-apps.your_domain.com;
+ location / {
+ proxy_pass http://localhost:3010;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host $host;
+ proxy_cache_bypass $http_upgrade;
+ }
+}
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/rtpengine-recording.ini b/packer/jambonz-mini/proxmox/files/rtpengine-recording.ini
new file mode 100644
index 0000000..2b8d5c8
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/rtpengine-recording.ini
@@ -0,0 +1,50 @@
+[rtpengine-recording]
+
+table = 42
+
+log-level = 5
+
+### number of worker threads (default 8)
+# num-threads = 16
+
+### where to forward to (unix socket)
+# forward-to = /run/rtpengine/sock
+
+### where to store recordings: file (default), db, both
+output-storage = file
+
+### format of stored recordings: wav (default), mp3
+# output-format = mp3
+# output-format = pcma
+output-format = wav
+
+### directory containing rtpengine metadata files
+spool-dir = /var/spool/recording
+
+### where to store media files to
+output-dir = /tmp/recordings
+
+### File name pattern to be used for recording files
+output-pattern = %Y%m%d%H00/rtpengine-%c-%t-M%S%u
+
+### resample all output audio
+resample-to = 8000
+
+### bits per second for MP3 encoding
+# mp3_bitrate = 24000
+
+### mix participating sources into a single output
+output-mixed = true
+
+### create one output file for each source
+# output-single = false
+
+### mix method: direct (mix input) channels (multi-channel)
+mix-method = direct
+
+### mysql configuration for db storage
+# mysql-host = localhost
+# mysql-port = 3306
+# mysql-user = rtpengine
+# mysql-pass = secret
+# mysql-db = rtpengine
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/files/rtpengine-recording.service b/packer/jambonz-mini/proxmox/files/rtpengine-recording.service
new file mode 100644
index 0000000..690ab9d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/rtpengine-recording.service
@@ -0,0 +1,29 @@
+
+[Unit]
+Description=rtpengine-recording
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStart=/usr/local/bin/rtpengine-recording --config-file=/etc/rtpengine-recording.ini
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/proxmox/files/rtpengine.gcp.service b/packer/jambonz-mini/proxmox/files/rtpengine.gcp.service
new file mode 100644
index 0000000..2f08f1a
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/rtpengine.gcp.service
@@ -0,0 +1,48 @@
+
+[Unit]
+Description=rtpengine
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip`'
+ExecStartPre=/bin/sh -c 'systemctl set-environment PUBLIC_IP=`curl -s -H "Metadata-Flavor: Google" http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip`'
+ExecStartPre=echo 'del 42' > /proc/rtpengine/control
+ExecStart=/usr/local/bin/rtpengine \
+--interface private/${LOCAL_IP} \
+--interface public/${LOCAL_IP}!${PUBLIC_IP} \
+--listen-ng=22222 \
+--listen-http=8080 \
+--listen-udp=12222 \
+--dtmf-log-dest=127.0.0.1:22223 \
+--listen-cli=127.0.0.1:9900 \
+--table=42 \
+--pidfile /run/rtpengine.pid \
+--port-min 40000 \
+--port-max 60000 \
+--recording-dir /var/spool/recording \
+--recording-method proc \
+--log-level 5 \
+--delete-delay 0
+PIDFile=/run/rtpengine.pid
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/proxmox/files/rtpengine.service b/packer/jambonz-mini/proxmox/files/rtpengine.service
new file mode 100644
index 0000000..31141d2
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/rtpengine.service
@@ -0,0 +1,48 @@
+
+[Unit]
+Description=rtpengine
+After=syslog.target network.target local-fs.target
+
+[Service]
+; service
+Type=forking
+Environment="LD_LIBRARY_PATH=/usr/local/lib/"
+ExecStartPre=/bin/sh -c 'systemctl set-environment LOCAL_IP=$$(/bin/ip -4 addr show eth0 | grep -oP "(?<=inet )\d+(\.\d+){3}" | head -n 1)'
+ExecStartPre=/bin/sh -c "systemctl set-environment PUBLIC_IP=`/usr/bin/curl -s http://ipecho.net/plain`"
+ExecStartPre=echo 'del 42' > /proc/rtpengine/control
+ExecStart=/usr/local/bin/rtpengine \
+--interface private/${LOCAL_IP} \
+--interface public/${PUBLIC_IP} \
+--listen-ng=22222 \
+--listen-http=8080 \
+--listen-udp=12222 \
+--dtmf-log-dest=127.0.0.1:22223 \
+--listen-cli=127.0.0.1:9900 \
+--table=42 \
+--pidfile /run/rtpengine.pid \
+--port-min 40000 \
+--port-max 60000 \
+--recording-dir /var/spool/recording \
+--recording-method proc \
+--log-level 5 \
+--delete-delay 0
+PIDFile=/run/rtpengine.pid
+TimeoutSec=15s
+Restart=always
+; exec
+User=root
+Group=daemon
+LimitCORE=infinity
+LimitNOFILE=100000
+LimitNPROC=60000
+;LimitSTACK=240
+LimitRTPRIO=infinity
+LimitRTTIME=7000000
+IOSchedulingClass=realtime
+IOSchedulingPriority=2
+CPUSchedulingPolicy=rr
+CPUSchedulingPriority=89
+UMask=0007
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packer/jambonz-mini/proxmox/files/switch.conf.xml b/packer/jambonz-mini/proxmox/files/switch.conf.xml
new file mode 100644
index 0000000..0e67ec1
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/switch.conf.xml
@@ -0,0 +1,184 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/switch_core_media.c.patch b/packer/jambonz-mini/proxmox/files/switch_core_media.c.patch
new file mode 100644
index 0000000..3d86c06
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/switch_core_media.c.patch
@@ -0,0 +1,11 @@
+--- switch_core_media.c 2022-03-04 19:02:35.000000000 -0500
++++ switch_core_media.c.new 2022-03-05 08:54:38.000000000 -0500
+@@ -2749,7 +2749,7 @@
+ *(buf + datalen) = '\0';
+
+ while (*buf & 0x80) {
+- if (buf + 3 > e) {
++ if (buf + 3 > e || count >= MAX_RED_FRAMES) {
+ *new_datalen = 0;
+ return 0;
+ }
diff --git a/packer/jambonz-mini/proxmox/files/switch_rtp.c.patch b/packer/jambonz-mini/proxmox/files/switch_rtp.c.patch
new file mode 100644
index 0000000..ee9debf
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/switch_rtp.c.patch
@@ -0,0 +1,40 @@
+--- switch_rtp.c 2021-01-12 02:11:42.334172596 +0000
++++ switch_rtp.c.new 2021-01-12 02:12:42.695207260 +0000
+@@ -5639,8 +5639,8 @@
+ static switch_size_t do_flush(switch_rtp_t *rtp_session, int force, switch_size_t bytes_in)
+ {
+ int was_blocking = 0;
+- switch_size_t bytes;
+- uint32_t flushed = 0;
++ //switch_size_t bytes;
++ //uint32_t flushed = 0;
+ switch_size_t bytes_out = 0;
+
+ if (!switch_rtp_ready(rtp_session)) {
+@@ -5700,7 +5700,7 @@
+ #endif
+ handle_rfc2833(rtp_session, bytes_in, &do_cng);
+ }
+-
++/*
+ do {
+ if (switch_rtp_ready(rtp_session)) {
+ bytes = sizeof(rtp_msg_t);
+@@ -5713,7 +5713,7 @@
+ rtp_session->last_media = switch_micro_time_now();
+ }
+
+- /* Make sure to handle RFC2833 packets, even if we're flushing the packets */
++ //Make sure to handle RFC2833 packets, even if we're flushing the packets
+ if (bytes > rtp_header_len && rtp_session->recv_msg.header.version == 2 && rtp_session->recv_msg.header.pt == rtp_session->recv_te) {
+ rtp_session->last_rtp_hdr = rtp_session->recv_msg.header;
+ handle_rfc2833(rtp_session, bytes, &do_cng);
+@@ -5732,7 +5732,7 @@
+ break;
+ }
+ } while (bytes > 0);
+-
++*/
+ #ifdef DEBUG_2833
+ if (flushed) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "*** do_flush: total flushed packets: %ld ***\n",(long)flushed);
diff --git a/packer/jambonz-mini/proxmox/files/telegraf.conf b/packer/jambonz-mini/proxmox/files/telegraf.conf
new file mode 100644
index 0000000..3aa5be4
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/telegraf.conf
@@ -0,0 +1,7531 @@
+# Telegraf Configuration
+#
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+#
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+#
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+#
+# Environment variables can be used anywhere in this config file, simply surround
+# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
+# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
+
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+ role = "mini"
+ # dc = "us-east-1" # will tag all metrics with dc=us-east-1
+ # rack = "1a"
+ ## Environment variables can be used as tags, and throughout the config file
+ # user = "$USER"
+
+
+# Configuration for telegraf agent
+[agent]
+ ## Default data collection interval for all inputs
+ interval = "10s"
+ ## Rounds collection interval to 'interval'
+ ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+ round_interval = true
+
+ ## Telegraf will send metrics to outputs in batches of at most
+ ## metric_batch_size metrics.
+ ## This controls the size of writes that Telegraf sends to output plugins.
+ metric_batch_size = 1000
+
+ ## Maximum number of unwritten metrics per output. Increasing this value
+ ## allows for longer periods of output downtime without dropping metrics at the
+ ## cost of higher maximum memory usage.
+ metric_buffer_limit = 10000
+
+ ## Collection jitter is used to jitter the collection by a random amount.
+ ## Each plugin will sleep for a random time within jitter before collecting.
+ ## This can be used to avoid many plugins querying things like sysfs at the
+ ## same time, which can have a measurable effect on the system.
+ collection_jitter = "0s"
+
+ ## Default flushing interval for all outputs. Maximum flush_interval will be
+ ## flush_interval + flush_jitter
+ flush_interval = "10s"
+ ## Jitter the flush interval by a random amount. This is primarily to avoid
+ ## large write spikes for users running a large number of telegraf instances.
+ ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+ flush_jitter = "0s"
+
+ ## By default or when set to "0s", precision will be set to the same
+ ## timestamp order as the collection interval, with the maximum being 1s.
+ ## ie, when interval = "10s", precision will be "1s"
+ ## when interval = "250ms", precision will be "1ms"
+ ## Precision will NOT be used for service inputs. It is up to each individual
+ ## service input to set the timestamp at the appropriate precision.
+ ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
+ precision = ""
+
+ ## Log at debug level.
+ # debug = false
+ ## Log only error level messages.
+ # quiet = false
+
+ ## Log target controls the destination for logs and can be one of "file",
+ ## "stderr" or, on Windows, "eventlog". When set to "file", the output file
+ ## is determined by the "logfile" setting.
+ # logtarget = "file"
+
+ ## Name of the file to be logged to when using the "file" logtarget. If set to
+ ## the empty string then logs are written to stderr.
+ # logfile = ""
+
+ ## The logfile will be rotated after the time interval specified. When set
+ ## to 0 no time based rotation is performed. Logs are rotated only when
+ ## written to, if there is no log activity rotation may be delayed.
+ # logfile_rotation_interval = "0d"
+
+ ## The logfile will be rotated when it becomes larger than the specified
+ ## size. When set to 0 no size based rotation is performed.
+ # logfile_rotation_max_size = "0MB"
+
+ ## Maximum number of rotated archives to keep, any older logs are deleted.
+ ## If set to -1, no archives are removed.
+ # logfile_rotation_max_archives = 5
+
+ ## Override default hostname, if empty use os.Hostname()
+ hostname = ""
+ ## If set to true, do no set the "host" tag in the telegraf agent.
+ omit_hostname = false
+
+
+###############################################################################
+# OUTPUT PLUGINS #
+###############################################################################
+
+
+# Configuration for sending metrics to InfluxDB
+[[outputs.influxdb]]
+ urls = ["http://127.0.0.1:8086/"] # required
+ database = "telegraf" # required
+ retention_policy = "autogen"
+ write_consistency = "any"
+ timeout = "5s"
+ namedrop = ["hep*"]
+
+[[outputs.influxdb]]
+ urls = ["http://127.0.0.1:8086/"] # required
+ database = "homer" # required
+ retention_policy = ""
+ write_consistency = "any"
+ timeout = "5s"
+ namepass = ["hep*"]
+
+ ## The full HTTP or UDP URL for your InfluxDB instance.
+ ##
+ ## Multiple URLs can be specified for a single cluster, only ONE of the
+ ## urls will be written to each interval.
+ # urls = ["unix:///var/run/influxdb.sock"]
+ # urls = ["udp://127.0.0.1:8089"]
+ # urls = ["http://127.0.0.1:8086"]
+
+ ## The target database for metrics; will be created as needed.
+ ## For UDP url endpoint database needs to be configured on server side.
+ # database = "telegraf"
+
+ ## The value of this tag will be used to determine the database. If this
+ ## tag is not set the 'database' option is used as the default.
+ # database_tag = ""
+
+ ## If true, the 'database_tag' will not be included in the written metric.
+ # exclude_database_tag = false
+
+ ## If true, no CREATE DATABASE queries will be sent. Set to true when using
+ ## Telegraf with a user without permissions to create databases or when the
+ ## database already exists.
+ # skip_database_creation = false
+
+ ## Name of existing retention policy to write to. Empty string writes to
+ ## the default retention policy. Only takes effect when using HTTP.
+ # retention_policy = ""
+
+ ## The value of this tag will be used to determine the retention policy. If this
+ ## tag is not set the 'retention_policy' option is used as the default.
+ # retention_policy_tag = ""
+
+ ## If true, the 'retention_policy_tag' will not be included in the written metric.
+ # exclude_retention_policy_tag = false
+
+ ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
+ ## Only takes effect when using HTTP.
+ # write_consistency = "any"
+
+ ## Timeout for HTTP messages.
+ # timeout = "5s"
+
+ ## HTTP Basic Auth
+ # username = "telegraf"
+ # password = "metricsmetricsmetricsmetrics"
+
+ ## HTTP User-Agent
+ # user_agent = "telegraf"
+
+ ## UDP payload size is the maximum packet size to send.
+ # udp_payload = "512B"
+
+ ## Optional TLS Config for use on HTTP connections.
+ # tls_ca = "/etc/telegraf/ca.pem"
+ # tls_cert = "/etc/telegraf/cert.pem"
+ # tls_key = "/etc/telegraf/key.pem"
+ ## Use TLS but skip chain & host verification
+ # insecure_skip_verify = false
+
+ ## HTTP Proxy override, if unset values the standard proxy environment
+ ## variables are consulted to determine which proxy, if any, should be used.
+ # http_proxy = "http://corporate.proxy:3128"
+
+ ## Additional HTTP headers
+ # http_headers = {"X-Special-Header" = "Special-Value"}
+
+ ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+ ## compress body or "identity" to apply no encoding.
+ # content_encoding = "gzip"
+
+ ## When true, Telegraf will output unsigned integers as unsigned values,
+ ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
+ ## integer values. Enabling this option will result in field type errors if
+ ## existing data has been written.
+ # influx_uint_support = false
+
+
+# # Configuration for Amon Server to send metrics to.
+# [[outputs.amon]]
+# ## Amon Server Key
+# server_key = "my-server-key" # required.
+#
+# ## Amon Instance URL
+# amon_instance = "https://youramoninstance" # required
+#
+# ## Connection timeout.
+# # timeout = "5s"
+
+
+# # Publishes metrics to an AMQP broker
+# [[outputs.amqp]]
+# ## Broker to publish to.
+# ## deprecated in 1.7; use the brokers option
+# # url = "amqp://localhost:5672/influxdb"
+#
+# ## Brokers to publish to. If multiple brokers are specified a random broker
+# ## will be selected anytime a connection is established. This can be
+# ## helpful for load balancing when not using a dedicated load balancer.
+# brokers = ["amqp://localhost:5672/influxdb"]
+#
+# ## Maximum messages to send over a connection. Once this is reached, the
+# ## connection is closed and a new connection is made. This can be helpful for
+# ## load balancing when not using a dedicated load balancer.
+# # max_messages = 0
+#
+# ## Exchange to declare and publish to.
+# exchange = "telegraf"
+#
+# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
+# # exchange_type = "topic"
+#
+# ## If true, exchange will be passively declared.
+# # exchange_passive = false
+#
+# ## Exchange durability can be either "transient" or "durable".
+# # exchange_durability = "durable"
+#
+# ## Additional exchange arguments.
+# # exchange_arguments = { }
+# # exchange_arguments = {"hash_property" = "timestamp"}
+#
+# ## Authentication credentials for the PLAIN auth_method.
+# # username = ""
+# # password = ""
+#
+# ## Auth method. PLAIN and EXTERNAL are supported
+# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+# ## described here: https://www.rabbitmq.com/plugins.html
+# # auth_method = "PLAIN"
+#
+# ## Metric tag to use as a routing key.
+# ## ie, if this tag exists, its value will be used as the routing key
+# # routing_tag = "host"
+#
+# ## Static routing key. Used when no routing_tag is set or as a fallback
+# ## when the tag specified in routing tag is not found.
+# # routing_key = ""
+# # routing_key = "telegraf"
+#
+# ## Delivery Mode controls if a published message is persistent.
+# ## One of "transient" or "persistent".
+# # delivery_mode = "transient"
+#
+# ## InfluxDB database added as a message header.
+# ## deprecated in 1.7; use the headers option
+# # database = "telegraf"
+#
+# ## InfluxDB retention policy added as a message header
+# ## deprecated in 1.7; use the headers option
+# # retention_policy = "default"
+#
+# ## Static headers added to each published message.
+# # headers = { }
+# # headers = {"database" = "telegraf", "retention_policy" = "default"}
+#
+# ## Connection timeout. If not provided, will default to 5s. 0s means no
+# ## timeout (not recommended).
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## If true use batch serialization format instead of line based delimiting.
+# ## Only applies to data formats which are not line based such as JSON.
+# ## Recommended to set to true.
+# # use_batch_format = false
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# ##
+# ## Please note that when use_batch_format = false each amqp message contains only
+# ## a single metric, it is recommended to use compression with batch format
+# ## for best results.
+# # content_encoding = "identity"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Send metrics to Azure Application Insights
+# [[outputs.application_insights]]
+# ## Instrumentation key of the Application Insights resource.
+# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
+#
+# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
+# # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
+#
+# ## Timeout for closing (default: 5s).
+# # timeout = "5s"
+#
+# ## Enable additional diagnostic logging.
+# # enable_diagnostic_logging = false
+#
+# ## Context Tag Sources add Application Insights context tags to a tag value.
+# ##
+# ## For list of allowed context tag keys see:
+# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
+# # [outputs.application_insights.context_tag_sources]
+# # "ai.cloud.role" = "kubernetes_container_name"
+# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
+
+
+# # Send aggregate metrics to Azure Monitor
+# [[outputs.azure_monitor]]
+# ## Timeout for HTTP writes.
+# # timeout = "20s"
+#
+# ## Set the namespace prefix, defaults to "Telegraf/".
+# # namespace_prefix = "Telegraf/"
+#
+# ## Azure Monitor doesn't have a string value type, so convert string
+# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
+# ## a maximum of 10 dimensions so Telegraf will only send the first 10
+# ## alphanumeric dimensions.
+# # strings_as_dimensions = false
+#
+# ## Both region and resource_id must be set or be available via the
+# ## Instance Metadata service on Azure Virtual Machines.
+# #
+# ## Azure Region to publish metrics against.
+# ## ex: region = "southcentralus"
+# # region = ""
+# #
+# ## The Azure Resource ID against which metric will be logged, e.g.
+# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/"
+# # resource_id = ""
+#
+# ## Optionally, if in Azure US Government, China or other sovereign
+# ## cloud environment, set appropriate REST endpoint for receiving
+# ## metrics. (Note: region may be unused in this context)
+# # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
+
+
+# # Publish Telegraf metrics to a Google Cloud PubSub topic
+# [[outputs.cloud_pubsub]]
+# ## Required. Name of Google Cloud Platform (GCP) Project that owns
+# ## the given PubSub topic.
+# project = "my-project"
+#
+# ## Required. Name of PubSub topic to publish metrics to.
+# topic = "my-topic"
+#
+# ## Required. Data format to consume.
+# ## Each data format has its own unique set of configuration options.
+# ## Read more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
+# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
+# ## Application Default Credentials, which is preferred.
+# # credentials_file = "path/to/my/creds.json"
+#
+# ## Optional. If true, will send all metrics per write in one PubSub message.
+# # send_batched = true
+#
+# ## The following publish_* parameters specifically configures batching
+# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
+# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
+#
+# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
+# ## when it has this many PubSub messages. If send_batched is true,
+# ## this is ignored and treated as if it were 1.
+# # publish_count_threshold = 1000
+#
+# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
+# ## when it has this many PubSub messages. If send_batched is true,
+# ## this is ignored and treated as if it were 1
+# # publish_byte_threshold = 1000000
+#
+# ## Optional. Specifically configures requests made to the PubSub API.
+# # publish_num_go_routines = 2
+#
+# ## Optional. Specifies a timeout for requests to the PubSub API.
+# # publish_timeout = "30s"
+#
+# ## Optional. If true, published PubSub message data will be base64-encoded.
+# # base64_data = false
+#
+# ## Optional. PubSub attributes to add to metrics.
+# # [outputs.cloud_pubsub.attributes]
+# # my_attr = "tag_value"
+
+
+# # Configuration for AWS CloudWatch output.
+# [[outputs.cloudwatch]]
+# ## Amazon REGION
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Namespace for the CloudWatch MetricDatums
+# namespace = "InfluxData/Telegraf"
+#
+# ## If you have a large amount of metrics, you should consider to send statistic
+# ## values instead of raw metrics which could not only improve performance but
+# ## also save AWS API cost. If enable this flag, this plugin would parse the required
+# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
+# ## You could use basicstats aggregator to calculate those fields. If not all statistic
+# ## fields are available, all fields would still be sent as raw metrics.
+# # write_statistics = false
+#
+# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
+# # high_resolution_metrics = false
+
+
+# # Configuration for CrateDB to send metrics to.
+# [[outputs.cratedb]]
+# # A github.com/jackc/pgx connection string.
+# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
+# url = "postgres://user:password@localhost/schema?sslmode=disable"
+# # Timeout for all CrateDB queries.
+# timeout = "5s"
+# # Name of the table to store metrics in.
+# table = "metrics"
+# # If true, and the metrics table does not exist, create it automatically.
+# table_create = true
+
+
+# # Configuration for DataDog API to send metrics to.
+# [[outputs.datadog]]
+# ## Datadog API key
+# apikey = "my-secret-key"
+#
+# ## Connection timeout.
+# # timeout = "5s"
+#
+# ## Write URL override; useful for debugging.
+# # url = "https://app.datadoghq.com/api/v1/series"
+
+
+# # Send metrics to nowhere at all
+# [[outputs.discard]]
+# # no configuration
+
+
+# # Send telegraf metrics to a Dynatrace environment
+# [[outputs.dynatrace]]
+# ## For usage with the Dynatrace OneAgent you can omit any configuration,
+# ## the only requirement is that the OneAgent is running on the same host.
+# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
+# ##
+# ## Your Dynatrace environment URL.
+# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
+# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
+# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
+# url = ""
+#
+# ## Your Dynatrace API token.
+# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
+# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
+# api_token = ""
+#
+# ## Optional prefix for metric names (e.g.: "telegraf.")
+# prefix = "telegraf."
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional flag for ignoring tls certificate check
+# # insecure_skip_verify = false
+#
+#
+# ## Connection timeout, defaults to "5s" if not set.
+# timeout = "5s"
+
+
+# # Configuration for Elasticsearch to send metrics to.
+# [[outputs.elasticsearch]]
+# ## The full HTTP endpoint URL for your Elasticsearch instance
+# ## Multiple urls can be specified as part of the same cluster,
+# ## this means that only ONE of the urls will be written to each interval.
+# urls = [ "http://node1.es.example.com:9200" ] # required.
+# ## Elasticsearch client timeout, defaults to "5s" if not set.
+# timeout = "5s"
+# ## Set to true to ask Elasticsearch a list of all cluster nodes,
+# ## thus it is not necessary to list all nodes in the urls config option.
+# enable_sniffer = false
+# ## Set the interval to check if the Elasticsearch nodes are available
+# ## Setting to "0s" will disable the health check (not recommended in production)
+# health_check_interval = "10s"
+# ## HTTP basic authentication details
+# # username = "telegraf"
+# # password = "mypassword"
+#
+# ## Index Config
+# ## The target index for metrics (Elasticsearch will create if it not exists).
+# ## You can use the date specifiers below to create indexes per time frame.
+# ## The metric timestamp will be used to decide the destination index name
+# # %Y - year (2016)
+# # %y - last two digits of year (00..99)
+# # %m - month (01..12)
+# # %d - day of month (e.g., 01)
+# # %H - hour (00..23)
+# # %V - week of the year (ISO week) (01..53)
+# ## Additionally, you can specify a tag name using the notation {{tag_name}}
+# ## which will be used as part of the index name. If the tag does not exist,
+# ## the default tag value will be used.
+# # index_name = "telegraf-{{host}}-%Y.%m.%d"
+# # default_tag_value = "none"
+# index_name = "telegraf-%Y.%m.%d" # required.
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Template Config
+# ## Set to true if you want telegraf to manage its index template.
+# ## If enabled it will create a recommended index template for telegraf indexes
+# manage_template = true
+# ## The template name used for telegraf indexes
+# template_name = "telegraf"
+# ## Set to true if you want telegraf to overwrite an existing template
+# overwrite_template = false
+# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
+# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's
+# force_document_id = false
+
+
+# # Send metrics to command as input over stdin
+# [[outputs.exec]]
+# ## Command to ingest metrics via stdin.
+# command = ["tee", "-a", "/dev/null"]
+#
+# ## Timeout for command to complete.
+# # timeout = "5s"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Run executable as long-running output plugin
+# [[outputs.execd]]
+# ## Program to run as daemon
+# command = ["my-telegraf-output", "--some-flag", "value"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to export.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send telegraf metrics to file(s)
+# [[outputs.file]]
+# ## Files to write to, "stdout" is a specially handled file.
+# files = ["stdout", "/tmp/metrics.out"]
+#
+# ## Use batch serialization format instead of line based delimiting. The
+# ## batch format allows for the production of non line based output formats and
+# ## may more efficiently encode metric groups.
+# # use_batch_format = false
+#
+# ## The file will be rotated after the time interval specified. When set
+# ## to 0 no time based rotation is performed.
+# # rotation_interval = "0d"
+#
+# ## The logfile will be rotated when it becomes larger than the specified
+# ## size. When set to 0 no size based rotation is performed.
+# # rotation_max_size = "0MB"
+#
+# ## Maximum number of rotated archives to keep, any older logs are deleted.
+# ## If set to -1, no archives are removed.
+# # rotation_max_archives = 5
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Configuration for Graphite server to send metrics to
+# [[outputs.graphite]]
+# ## TCP endpoint for your graphite instance.
+# ## If multiple endpoints are configured, output will be load balanced.
+# ## Only one of the endpoints will be written to with each iteration.
+# servers = ["localhost:2003"]
+# ## Prefix metrics name
+# prefix = ""
+# ## Graphite output template
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# template = "host.tags.measurement.field"
+#
+# ## Enable Graphite tags support
+# # graphite_tag_support = false
+#
+# ## Character for separating metric name and field for Graphite tags
+# # graphite_separator = "."
+#
+# ## Graphite templates patterns
+# ## 1. Template for cpu
+# ## 2. Template for disk*
+# ## 3. Default template
+# # templates = [
+# # "cpu tags.measurement.host.field",
+# # "disk* measurement.field",
+# # "host.measurement.tags.field"
+# #]
+#
+# ## timeout in seconds for the write connection to graphite
+# timeout = 2
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Send telegraf metrics to graylog
+# [[outputs.graylog]]
+# ## UDP endpoint for your graylog instance.
+# servers = ["127.0.0.1:12201"]
+#
+# ## The field to use as the GELF short_message, if unset the static string
+# ## "telegraf" will be used.
+# ## example: short_message_field = "message"
+# # short_message_field = ""
+
+
+# # Configurable HTTP health check resource based on metrics
+# [[outputs.health]]
+# ## Address and port to listen on.
+# ## ex: service_address = "http://localhost:8080"
+# ## service_address = "unix:///var/run/telegraf-health.sock"
+# # service_address = "http://:8080"
+#
+# ## The maximum duration for reading the entire request.
+# # read_timeout = "5s"
+# ## The maximum duration for writing the entire response.
+# # write_timeout = "5s"
+#
+# ## Username and password to accept for HTTP basic authentication.
+# # basic_username = "user1"
+# # basic_password = "secret"
+#
+# ## Allowed CA certificates for client certificates.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## TLS server certificate and private key.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## One or more check sub-tables should be defined, it is also recommended to
+# ## use metric filtering to limit the metrics that flow into this output.
+# ##
+# ## When using the default buffer sizes, this example will fail when the
+# ## metric buffer is half full.
+# ##
+# ## namepass = ["internal_write"]
+# ## tagpass = { output = ["influxdb"] }
+# ##
+# ## [[outputs.health.compares]]
+# ## field = "buffer_size"
+# ## lt = 5000.0
+# ##
+# ## [[outputs.health.contains]]
+# ## field = "buffer_size"
+
+
+# # A plugin that can transmit metrics over HTTP
+# [[outputs.http]]
+# ## URL is the address to send metrics to
+# url = "http://127.0.0.1:8080/telegraf"
+#
+# ## Timeout for HTTP message
+# # timeout = "5s"
+#
+# ## HTTP method, one of: "POST" or "PUT"
+# # method = "POST"
+#
+# ## HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## OAuth2 Client Credentials Grant
+# # client_id = "clientid"
+# # client_secret = "secret"
+# # token_url = "https://indentityprovider/oauth2/v1/token"
+# # scopes = ["urn:opc:idm:__myscopes__"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to output.
+# ## Each data format has it's own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+#
+# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Additional HTTP headers
+# # [outputs.http.headers]
+# # # Should be set manually to "application/json" for json data_format
+# # Content-Type = "text/plain; charset=utf-8"
+
+
+# # Configuration for sending metrics to InfluxDB
+# [[outputs.influxdb_v2]]
+# ## The URLs of the InfluxDB cluster nodes.
+# ##
+# ## Multiple URLs can be specified for a single cluster, only ONE of the
+# ## urls will be written to each interval.
+# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
+# urls = ["http://127.0.0.1:8086"]
+#
+# ## Token for authentication.
+# token = ""
+#
+# ## Organization is the name of the organization you wish to write to; must exist.
+# organization = ""
+#
+# ## Destination bucket to write into.
+# bucket = ""
+#
+# ## The value of this tag will be used to determine the bucket. If this
+# ## tag is not set the 'bucket' option is used as the default.
+# # bucket_tag = ""
+#
+# ## If true, the bucket tag will not be added to the metric.
+# # exclude_bucket_tag = false
+#
+# ## Timeout for HTTP messages.
+# # timeout = "5s"
+#
+# ## Additional HTTP headers
+# # http_headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## HTTP Proxy override, if unset values the standard proxy environment
+# ## variables are consulted to determine which proxy, if any, should be used.
+# # http_proxy = "http://corporate.proxy:3128"
+#
+# ## HTTP User-Agent
+# # user_agent = "telegraf"
+#
+# ## Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "gzip"
+#
+# ## Enable or disable uint support for writing uints influxdb 2.0.
+# # influx_uint_support = false
+#
+# ## Optional TLS Config for use on HTTP connections.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Configuration for sending metrics to an Instrumental project
+# [[outputs.instrumental]]
+# ## Project API Token (required)
+# api_token = "API Token" # required
+# ## Prefix the metrics with a given name
+# prefix = ""
+# ## Stats output template (Graphite formatting)
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+# template = "host.tags.measurement.field"
+# ## Timeout in seconds to connect
+# timeout = "2s"
+# ## Display Communication to Instrumental
+# debug = false
+
+
+# # Configuration for the Kafka server to send metrics to
+# [[outputs.kafka]]
+# ## URLs of kafka brokers
+# brokers = ["localhost:9092"]
+# ## Kafka topic for producer messages
+# topic = "telegraf"
+#
+# ## The value of this tag will be used as the topic. If not set the 'topic'
+# ## option is used.
+# # topic_tag = ""
+#
+# ## If true, the 'topic_tag' will be removed from to the metric.
+# # exclude_topic_tag = false
+#
+# ## Optional Client id
+# # client_id = "Telegraf"
+#
+# ## Set the minimal supported Kafka version. Setting this enables the use of new
+# ## Kafka features and APIs. Of particular interest, lz4 compression
+# ## requires at least version 0.10.0.0.
+# ## ex: version = "1.1.0"
+# # version = ""
+#
+# ## Optional topic suffix configuration.
+# ## If the section is omitted, no suffix is used.
+# ## Following topic suffix methods are supported:
+# ## measurement - suffix equals to separator + measurement's name
+# ## tags - suffix equals to separator + specified tags' values
+# ## interleaved with separator
+#
+# ## Suffix equals to "_" + measurement name
+# # [outputs.kafka.topic_suffix]
+# # method = "measurement"
+# # separator = "_"
+#
+# ## Suffix equals to "__" + measurement's "foo" tag value.
+# ## If there's no such a tag, suffix equals to an empty string
+# # [outputs.kafka.topic_suffix]
+# # method = "tags"
+# # keys = ["foo"]
+# # separator = "__"
+#
+# ## Suffix equals to "_" + measurement's "foo" and "bar"
+# ## tag values, separated by "_". If there is no such tags,
+# ## their values treated as empty strings.
+# # [outputs.kafka.topic_suffix]
+# # method = "tags"
+# # keys = ["foo", "bar"]
+# # separator = "_"
+#
+# ## The routing tag specifies a tagkey on the metric whose value is used as
+# ## the message key. The message key is used to determine which partition to
+# ## send the message to. This tag is prefered over the routing_key option.
+# routing_tag = "host"
+#
+# ## The routing key is set as the message key and used to determine which
+# ## partition to send the message to. This value is only used when no
+# ## routing_tag is set or as a fallback when the tag specified in routing tag
+# ## is not found.
+# ##
+# ## If set to "random", a random value will be generated for each message.
+# ##
+# ## When unset, no message key is added and each message is routed to a random
+# ## partition.
+# ##
+# ## ex: routing_key = "random"
+# ## routing_key = "telegraf"
+# # routing_key = ""
+#
+# ## CompressionCodec represents the various compression codecs recognized by
+# ## Kafka in messages.
+# ## 0 : No compression
+# ## 1 : Gzip compression
+# ## 2 : Snappy compression
+# ## 3 : LZ4 compression
+# # compression_codec = 0
+#
+# ## RequiredAcks is used in Produce Requests to tell the broker how many
+# ## replica acknowledgements it must see before responding
+# ## 0 : the producer never waits for an acknowledgement from the broker.
+# ## This option provides the lowest latency but the weakest durability
+# ## guarantees (some data will be lost when a server fails).
+# ## 1 : the producer gets an acknowledgement after the leader replica has
+# ## received the data. This option provides better durability as the
+# ## client waits until the server acknowledges the request as successful
+# ## (only messages that were written to the now-dead leader but not yet
+# ## replicated will be lost).
+# ## -1: the producer gets an acknowledgement after all in-sync replicas have
+# ## received the data. This option provides the best durability, we
+# ## guarantee that no messages will be lost as long as at least one in
+# ## sync replica remains.
+# # required_acks = -1
+#
+# ## The maximum number of times to retry sending a metric before failing
+# ## until the next flush.
+# # max_retry = 3
+#
+# ## The maximum permitted size of a message. Should be set equal to or
+# ## smaller than the broker's 'message.max.bytes'.
+# # max_message_bytes = 1000000
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional SASL Config
+# # sasl_username = "kafka"
+# # sasl_password = "secret"
+#
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# # data_format = "influx"
+
+
+# # Configuration for the AWS Kinesis output.
+# [[outputs.kinesis]]
+# ## Amazon REGION of kinesis endpoint.
+# region = "ap-southeast-2"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Kinesis StreamName must exist prior to starting telegraf.
+# streamname = "StreamName"
+# ## DEPRECATED: PartitionKey as used for sharding data.
+# partitionkey = "PartitionKey"
+# ## DEPRECATED: If set the partitionKey will be a random UUID on every put.
+# ## This allows for scaling across multiple shards in a stream.
+# ## This will cause issues with ordering.
+# use_random_partitionkey = false
+# ## The partition key can be calculated using one of several methods:
+# ##
+# ## Use a static value for all writes:
+# # [outputs.kinesis.partition]
+# # method = "static"
+# # key = "howdy"
+# #
+# ## Use a random partition key on each write:
+# # [outputs.kinesis.partition]
+# # method = "random"
+# #
+# ## Use the measurement name as the partition key:
+# # [outputs.kinesis.partition]
+# # method = "measurement"
+# #
+# ## Use the value of a tag for all writes, if the tag is not set the empty
+# ## default option will be used. When no default, defaults to "telegraf"
+# # [outputs.kinesis.partition]
+# # method = "tag"
+# # key = "host"
+# # default = "mykey"
+#
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+#
+# ## debug will show upstream aws messages.
+# debug = false
+
+
+# # Configuration for Librato API to send metrics to.
+# [[outputs.librato]]
+# ## Librato API Docs
+# ## http://dev.librato.com/v1/metrics-authentication
+# ## Librato API user
+# api_user = "telegraf@influxdb.com" # required.
+# ## Librato API token
+# api_token = "my-secret-token" # required.
+# ## Debug
+# # debug = false
+# ## Connection timeout.
+# # timeout = "5s"
+# ## Output source Template (same as graphite buckets)
+# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
+# ## This template is used in librato's source (not metric's name)
+# template = "host"
+#
+
+
+# # Configuration for MQTT server to send metrics to
+# [[outputs.mqtt]]
+# servers = ["localhost:1883"] # required.
+#
+# ## MQTT outputs send metrics to this topic format
+# ## "///"
+# ## ex: prefix/web01.example.com/mem
+# topic_prefix = "telegraf"
+#
+# ## QoS policy for messages
+# ## 0 = at most once
+# ## 1 = at least once
+# ## 2 = exactly once
+# # qos = 2
+#
+# ## username and password to connect MQTT server.
+# # username = "telegraf"
+# # password = "metricsmetricsmetricsmetrics"
+#
+# ## client ID, if not set a random ID is generated
+# # client_id = ""
+#
+# ## Timeout for write operations. default: 5s
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## When true, metrics will be sent in one MQTT message per flush. Otherwise,
+# ## metrics are written one metric per MQTT message.
+# # batch = false
+#
+# ## When true, metric will have RETAIN flag set, making broker cache entries until someone
+# ## actually reads it
+# # retain = false
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send telegraf measurements to NATS
+# [[outputs.nats]]
+# ## URLs of NATS servers
+# servers = ["nats://localhost:4222"]
+#
+# ## Optional credentials
+# # username = ""
+# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
+# ## NATS subject for producer messages
+# subject = "telegraf"
+#
+# ## Use Transport Layer Security
+# # secure = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Send metrics to New Relic metrics endpoint
+# [[outputs.newrelic]]
+# ## New Relic Insights API key
+# insights_key = "insights api key"
+#
+# ## Prefix to add to add to metric name for easy identification.
+# # metric_prefix = ""
+#
+# ## Timeout for writes to the New Relic API.
+# # timeout = "15s"
+
+
+# # Send telegraf measurements to NSQD
+# [[outputs.nsq]]
+# ## Location of nsqd instance listening on TCP
+# server = "localhost:4150"
+# ## NSQ topic for producer messages
+# topic = "telegraf"
+#
+# ## Data format to output.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+# data_format = "influx"
+
+
+# # Configuration for OpenTSDB server to send metrics to
+# [[outputs.opentsdb]]
+# ## prefix for metrics keys
+# prefix = "my.specific.prefix."
+#
+# ## DNS name of the OpenTSDB server
+# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
+# ## telnet API. "http://opentsdb.example.com" will use the Http API.
+# host = "opentsdb.example.com"
+#
+# ## Port of the OpenTSDB server
+# port = 4242
+#
+# ## Number of data points to send to OpenTSDB in Http requests.
+# ## Not used with telnet API.
+# http_batch_size = 50
+#
+# ## URI Path for Http requests to OpenTSDB.
+# ## Used in cases where OpenTSDB is located behind a reverse proxy.
+# http_path = "/api/put"
+#
+# ## Debug true - Prints OpenTSDB communication
+# debug = false
+#
+# ## Separator separates measurement name from field
+# separator = "_"
+
+
+# # Configuration for the Prometheus client to spawn
+# [[outputs.prometheus_client]]
+# ## Address to listen on
+# listen = ":9273"
+#
+# ## Metric version controls the mapping from Telegraf metrics into
+# ## Prometheus format. When using the prometheus input, use the same value in
+# ## both plugins to ensure metrics are round-tripped without modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## Use HTTP Basic Authentication.
+# # basic_username = "Foo"
+# # basic_password = "Bar"
+#
+# ## If set, the IP Ranges which are allowed to access metrics.
+# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
+# # ip_range = []
+#
+# ## Path to publish the metrics on.
+# # path = "/metrics"
+#
+# ## Expiration interval for each metric. 0 == no expiration
+# # expiration_interval = "60s"
+#
+# ## Collectors to enable, valid entries are "gocollector" and "process".
+# ## If unset, both are enabled.
+# # collectors_exclude = ["gocollector", "process"]
+#
+# ## Send string metrics as Prometheus labels.
+# ## Unless set to false all string metrics will be sent as labels.
+# # string_as_label = true
+#
+# ## If set, enable TLS with the given certificate.
+# # tls_cert = "/etc/ssl/telegraf.crt"
+# # tls_key = "/etc/ssl/telegraf.key"
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Export metric collection time.
+# # export_timestamp = false
+
+
+# # Configuration for the Riemann server to send metrics to
+# [[outputs.riemann]]
+# ## The full TCP or UDP URL of the Riemann server
+# url = "tcp://localhost:5555"
+#
+# ## Riemann event TTL, floating-point time in seconds.
+# ## Defines how long that an event is considered valid for in Riemann
+# # ttl = 30.0
+#
+# ## Separator to use between measurement and field name in Riemann service name
+# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
+# separator = "/"
+#
+# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
+# # measurement_as_attribute = false
+#
+# ## Send string metrics as Riemann event states.
+# ## Unless enabled all string metrics will be ignored
+# # string_as_state = false
+#
+# ## A list of tag keys whose values get sent as Riemann tags.
+# ## If empty, all Telegraf tag values will be sent as tags
+# # tag_keys = ["telegraf","custom_tag"]
+#
+# ## Additional Riemann tags to send.
+# # tags = ["telegraf-output"]
+#
+# ## Description for Riemann event
+# # description_text = "metrics collected from telegraf"
+#
+# ## Riemann client write timeout, defaults to "5s" if not set.
+# # timeout = "5s"
+
+
+# # Configuration for the Riemann server to send metrics to
+# [[outputs.riemann_legacy]]
+# ## URL of server
+# url = "localhost:5555"
+# ## transport protocol to use either tcp or udp
+# transport = "tcp"
+# ## separator to use between input name and field name in Riemann service name
+# separator = " "
+
+
+# # Generic socket writer capable of handling multiple socket types.
+# [[outputs.socket_writer]]
+# ## URL to connect to
+# # address = "tcp://127.0.0.1:8094"
+# # address = "tcp://example.com:http"
+# # address = "tcp4://127.0.0.1:8094"
+# # address = "tcp6://127.0.0.1:8094"
+# # address = "tcp6://[2001:db8::1]:8094"
+# # address = "udp://127.0.0.1:8094"
+# # address = "udp4://127.0.0.1:8094"
+# # address = "udp6://127.0.0.1:8094"
+# # address = "unix:///tmp/telegraf.sock"
+# # address = "unixgram:///tmp/telegraf.sock"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## Content encoding for packet-based connections (i.e. UDP, unixgram).
+# ## Can be set to "gzip" or to "identity" to apply no encoding.
+# ##
+# # content_encoding = "identity"
+#
+# ## Data format to generate.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+
+
+# # Configuration for Google Cloud Stackdriver to send metrics to
+# [[outputs.stackdriver]]
+# ## GCP Project
+# project = "erudite-bloom-151019"
+#
+# ## The namespace for the metric descriptor
+# namespace = "telegraf"
+#
+# ## Custom resource type
+# # resource_type = "generic_node"
+#
+# ## Additional resource labels
+# # [outputs.stackdriver.resource_labels]
+# # node_id = "$HOSTNAME"
+# # namespace = "myapp"
+# # location = "eu-north0"
+
+
+# # A plugin that can transmit metrics to Sumo Logic HTTP Source
+# [[outputs.sumologic]]
+# ## Unique URL generated for your HTTP Metrics Source.
+# ## This is the address to send metrics to.
+# # url = "https://events.sumologic.net/receiver/v1/http/"
+#
+# ## Data format to be used for sending metrics.
+# ## This will set the "Content-Type" header accordingly.
+# ## Currently supported formats:
+# ## * graphite - for Content-Type of application/vnd.sumologic.graphite
+# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2
+# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus
+# ##
+# ## More information can be found at:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics
+# ##
+# ## NOTE:
+# ## When unset, telegraf will by default use the influx serializer which is currently unsupported
+# ## in HTTP Source.
+# data_format = "carbon2"
+#
+# ## Timeout used for HTTP request
+# # timeout = "5s"
+#
+# ## Max HTTP request body size in bytes before compression (if applied).
+# ## By default 1MB is recommended.
+# ## NOTE:
+# ## Bear in mind that in some serializer a metric even though serialized to multiple
+# ## lines cannot be split any further so setting this very low might not work
+# ## as expected.
+# # max_request_body_size = 1000000
+#
+# ## Additional, Sumo specific options.
+# ## Full list can be found here:
+# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers
+#
+# ## Desired source name.
+# ## Useful if you want to override the source name configured for the source.
+# # source_name = ""
+#
+# ## Desired host name.
+# ## Useful if you want to override the source host configured for the source.
+# # source_host = ""
+#
+# ## Desired source category.
+# ## Useful if you want to override the source category configured for the source.
+# # source_category = ""
+#
+# ## Comma-separated key=value list of dimensions to apply to every metric.
+# ## Custom dimensions will allow you to query your metrics at a more granular level.
+# # dimensions = ""
+
+
+# # Configuration for Syslog server to send metrics to
+# [[outputs.syslog]]
+# ## URL to connect to
+# ## ex: address = "tcp://127.0.0.1:8094"
+# ## ex: address = "tcp4://127.0.0.1:8094"
+# ## ex: address = "tcp6://127.0.0.1:8094"
+# ## ex: address = "tcp6://[2001:db8::1]:8094"
+# ## ex: address = "udp://127.0.0.1:8094"
+# ## ex: address = "udp4://127.0.0.1:8094"
+# ## ex: address = "udp6://127.0.0.1:8094"
+# address = "tcp://127.0.0.1:8094"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## The framing technique with which it is expected that messages are
+# ## transported (default = "octet-counting"). Whether the messages come
+# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must
+# ## be one of "octet-counting", "non-transparent".
+# # framing = "octet-counting"
+#
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
+# ## Must be one of "LF", or "NUL".
+# # trailer = "LF"
+#
+# ## SD-PARAMs settings
+# ## Syslog messages can contain key/value pairs within zero or more
+# ## structured data sections. For each unrecognized metric tag/field a
+# ## SD-PARAMS is created.
+# ##
+# ## Example:
+# ## [[outputs.syslog]]
+# ## sdparam_separator = "_"
+# ## default_sdid = "default@32473"
+# ## sdids = ["foo@123", "bar@456"]
+# ##
+# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
+# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
+#
+# ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
+# # sdparam_separator = "_"
+#
+# ## Default sdid used for tags/fields that don't contain a prefix defined in
+# ## the explicit sdids setting below If no default is specified, no SD-PARAMs
+# ## will be used for unrecognized field.
+# # default_sdid = "default@32473"
+#
+# ## List of explicit prefixes to extract from tag/field keys and use as the
+# ## SDID, if they match (see above example for more details):
+# # sdids = ["foo@123", "bar@456"]
+#
+# ## Default severity value. Severity and Facility are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field
+# ## with key "severity_code" is defined. If unset, 5 (notice) is the default
+# # default_severity_code = 5
+#
+# ## Default facility value. Facility and Severity are used to calculate the
+# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with
+# ## key "facility_code" is defined. If unset, 1 (user-level) is the default
+# # default_facility_code = 1
+#
+# ## Default APP-NAME value (RFC5424#section-6.2.5)
+# ## Used when no metric tag with key "appname" is defined.
+# ## If unset, "Telegraf" is the default
+# # default_appname = "Telegraf"
+
+
+# # Configuration for Amazon Timestream output.
+# [[outputs.timestream]]
+# ## Amazon Region
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order:
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) Explicit credentials from 'access_key' and 'secret_key'
+# ## 3) Shared profile from 'profile'
+# ## 4) Environment variables
+# ## 5) Shared credentials file
+# ## 6) EC2 Instance Profile
+# #access_key = ""
+# #secret_key = ""
+# #token = ""
+# #role_arn = ""
+# #profile = ""
+# #shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Timestream database where the metrics will be inserted.
+# ## The database must exist prior to starting Telegraf.
+# database_name = "yourDatabaseNameHere"
+#
+# ## Specifies if the plugin should describe the Timestream database upon starting
+# ## to validate if it has access necessary permissions, connection, etc., as a safety check.
+# ## If the describe operation fails, the plugin will not start
+# ## and therefore the Telegraf agent will not start.
+# describe_database_on_start = false
+#
+# ## The mapping mode specifies how Telegraf records are represented in Timestream.
+# ## Valid values are: single-table, multi-table.
+# ## For example, consider the following data in line protocol format:
+# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200
+# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200
+# ## where weather and airquality are the measurement names, location and season are tags,
+# ## and temperature, humidity, no2, pm25 are fields.
+# ## In multi-table mode:
+# ## - first line will be ingested to table named weather
+# ## - second line will be ingested to table named airquality
+# ## - the tags will be represented as dimensions
+# ## - first table (weather) will have two records:
+# ## one with measurement name equals to temperature,
+# ## another with measurement name equals to humidity
+# ## - second table (airquality) will have two records:
+# ## one with measurement name equals to no2,
+# ## another with measurement name equals to pm25
+# ## - the Timestream tables from the example will look like this:
+# ## TABLE "weather":
+# ## time | location | season | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82
+# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71
+# ## TABLE "airquality":
+# ## time | location | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-west | no2 | 5
+# ## 2016-06-13 17:43:50 | us-west | pm25 | 16
+# ## In single-table mode:
+# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name)
+# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name)
+# ## - location and season will be represented as dimensions
+# ## - temperature, humidity, no2, pm25 will be represented as measurement name
+# ## - the Timestream table from the example will look like this:
+# ## Assuming:
+# ## - single_table_name = "my_readings"
+# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace"
+# ## TABLE "my_readings":
+# ## time | location | season | namespace | measure_name | measure_value::bigint
+# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82
+# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71
+# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5
+# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16
+# ## In most cases, using multi-table mapping mode is recommended.
+# ## However, you can consider using single-table in situations when you have thousands of measurement names.
+# mapping_mode = "multi-table"
+#
+# ## Only valid and required for mapping_mode = "single-table"
+# ## Specifies the Timestream table where the metrics will be uploaded.
+# # single_table_name = "yourTableNameHere"
+#
+# ## Only valid and required for mapping_mode = "single-table"
+# ## Describes what will be the Timestream dimension name for the Telegraf
+# ## measurement name.
+# # single_table_dimension_name_for_telegraf_measurement_name = "namespace"
+#
+# ## Specifies if the plugin should create the table, if the table do not exist.
+# ## The plugin writes the data without prior checking if the table exists.
+# ## When the table does not exist, the error returned from Timestream will cause
+# ## the plugin to create the table, if this parameter is set to true.
+# create_table_if_not_exists = true
+#
+# ## Only valid and required if create_table_if_not_exists = true
+# ## Specifies the Timestream table magnetic store retention period in days.
+# ## Check Timestream documentation for more details.
+# create_table_magnetic_store_retention_period_in_days = 365
+#
+# ## Only valid and required if create_table_if_not_exists = true
+# ## Specifies the Timestream table memory store retention period in hours.
+# ## Check Timestream documentation for more details.
+# create_table_memory_store_retention_period_in_hours = 24
+#
+# ## Only valid and optional if create_table_if_not_exists = true
+# ## Specifies the Timestream table tags.
+# ## Check Timestream documentation for more details
+# # create_table_tags = { "foo" = "bar", "environment" = "dev"}
+
+
+# # Write metrics to Warp 10
+# [[outputs.warp10]]
+# # Prefix to add to the measurement.
+# prefix = "telegraf."
+#
+# # URL of the Warp 10 server
+# warp_url = "http://localhost:8080"
+#
+# # Write token to access your app on warp 10
+# token = "Token"
+#
+# # Warp 10 query timeout
+# # timeout = "15s"
+#
+# ## Print Warp 10 error body
+# # print_error_body = false
+#
+# ##Â Max string error size
+# # max_string_error_size = 511
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Configuration for Wavefront server to send metrics to
+# [[outputs.wavefront]]
+# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
+# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
+# url = "https://metrics.wavefront.com"
+#
+# ## Authentication Token for Wavefront. Only required if using Direct Ingestion
+# #token = "DUMMY_TOKEN"
+#
+# ## DNS name of the wavefront proxy server. Do not use if url is specified
+# #host = "wavefront.example.com"
+#
+# ## Port that the Wavefront proxy server listens on. Do not use if url is specified
+# #port = 2878
+#
+# ## prefix for metrics keys
+# #prefix = "my.specific.prefix."
+#
+# ## whether to use "value" for name of simple fields. default is false
+# #simple_fields = false
+#
+# ## character to use between metric and field name. default is . (dot)
+# #metric_separator = "."
+#
+# ## Convert metric name paths to use metricSeparator character
+# ## When true will convert all _ (underscore) characters in final metric name. default is true
+# #convert_paths = true
+#
+# ## Use Strict rules to sanitize metric and tag names from invalid characters
+# ## When enabled forward slash (/) and comma (,) will be accepted
+# #use_strict = false
+#
+# ## Use Regex to sanitize metric and tag names from invalid characters
+# ## Regex is more thorough, but significantly slower. default is false
+# #use_regex = false
+#
+# ## point tags to use as the source name for Wavefront (if none found, host will be used)
+# #source_override = ["hostname", "address", "agent_host", "node_host"]
+#
+# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
+# #convert_bool = true
+#
+# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any
+# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility.
+# #truncate_tags = false
+#
+# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
+# ## deprecated in 1.9; use the enum processor plugin
+# #[[outputs.wavefront.string_to_number.elasticsearch]]
+# # green = 1.0
+# # yellow = 0.5
+# # red = 0.0
+
+
+###############################################################################
+# PROCESSOR PLUGINS #
+###############################################################################
+
+
+# # Clone metrics and apply modifications.
+# [[processors.clone]]
+# ## All modifications on inputs and aggregators can be overridden:
+# # name_override = "new_name"
+# # name_prefix = "new_name_prefix"
+# # name_suffix = "new_name_suffix"
+#
+# ## Tags to be added (all values must be strings)
+# # [processors.clone.tags]
+# # additional_tag = "tag_value"
+
+
+# # Convert values to another metric value type
+# [[processors.converter]]
+# ## Tags to convert
+# ##
+# ## The table key determines the target type, and the array of key-values
+# ## select the keys to convert. The array may contain globs.
+# ## = [...]
+# [processors.converter.tags]
+# measurement = []
+# string = []
+# integer = []
+# unsigned = []
+# boolean = []
+# float = []
+#
+# ## Fields to convert
+# ##
+# ## The table key determines the target type, and the array of key-values
+# ## select the keys to convert. The array may contain globs.
+# ## = [...]
+# [processors.converter.fields]
+# measurement = []
+# tag = []
+# string = []
+# integer = []
+# unsigned = []
+# boolean = []
+# float = []
+
+
+# # Dates measurements, tags, and fields that pass through this filter.
+# [[processors.date]]
+# ## New tag to create
+# tag_key = "month"
+#
+# ## New field to create (cannot set both field_key and tag_key)
+# # field_key = "month"
+#
+# ## Date format string, must be a representation of the Go "reference time"
+# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
+# date_format = "Jan"
+#
+# ## If destination is a field, date format can also be one of
+# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field.
+# # date_format = "unix"
+#
+# ## Offset duration added to the date string when writing the new tag.
+# # date_offset = "0s"
+#
+# ## Timezone to use when creating the tag or field using a reference time
+# ## string. This can be set to one of "UTC", "Local", or to a location name
+# ## in the IANA Time Zone database.
+# ## example: timezone = "America/Los_Angeles"
+# # timezone = "UTC"
+
+
+# # Filter metrics with repeating field values
+# [[processors.dedup]]
+# ## Maximum time to suppress output
+# dedup_interval = "600s"
+
+
+# # Defaults sets default value(s) for specified fields that are not set on incoming metrics.
+# [[processors.defaults]]
+# ## Ensures a set of fields always exists on your metric(s) with their
+# ## respective default value.
+# ## For any given field pair (key = default), if it's not set, a field
+# ## is set on the metric with the specified default.
+# ##
+# ## A field is considered not set if it is nil on the incoming metric;
+# ## or it is not nil but its value is an empty string or is a string
+# ## of one or more spaces.
+# ## =
+# # [processors.defaults.fields]
+# # field_1 = "bar"
+# # time_idle = 0
+# # is_error = true
+
+
+# # Map enum values according to given table.
+# [[processors.enum]]
+# [[processors.enum.mapping]]
+# ## Name of the field to map
+# field = "status"
+#
+# ## Name of the tag to map
+# # tag = "status"
+#
+# ## Destination tag or field to be used for the mapped value. By default the
+# ## source tag or field is used, overwriting the original value.
+# dest = "status_code"
+#
+# ## Default value to be used for all values not contained in the mapping
+# ## table. When unset, the unmodified value for the field will be used if no
+# ## match is found.
+# # default = 0
+#
+# ## Table of mappings
+# [processors.enum.mapping.value_mappings]
+# green = 1
+# amber = 2
+# red = 3
+
+
+# # Run executable as long-running processor plugin
+# [[processors.execd]]
+# ## Program to run as daemon
+# ## eg: command = ["/path/to/your_program", "arg1", "arg2"]
+# command = ["cat"]
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+
+
+# # Performs file path manipulations on tags and fields
+# [[processors.filepath]]
+# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag
+# # [[processors.filepath.basename]]
+# # tag = "path"
+# # dest = "basepath"
+#
+# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory
+# # [[processors.filepath.dirname]]
+# # field = "path"
+#
+# ## Treat the tag value as a path, converting it to its the last element without its suffix
+# # [[processors.filepath.stem]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to the shortest path name equivalent
+# ## to path by purely lexical processing
+# # [[processors.filepath.clean]]
+# # tag = "path"
+#
+# ## Treat the tag value as a path, converting it to a relative path that is lexically
+# ## equivalent to the source path when joined to 'base_path'
+# # [[processors.filepath.rel]]
+# # tag = "path"
+# # base_path = "/var/log"
+#
+# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only
+# ## effect on Windows
+# # [[processors.filepath.toslash]]
+# # tag = "path"
+
+
+# # Add a tag of the network interface name looked up over SNMP by interface number
+# [[processors.ifname]]
+# ## Name of tag holding the interface number
+# # tag = "ifIndex"
+#
+# ## Name of output tag where service name will be added
+# # dest = "ifName"
+#
+# ## Name of tag of the SNMP agent to request the interface name from
+# # agent = "agent"
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
+#
+# ## SNMP community string.
+# # community = "public"
+#
+# ## Number of retries to attempt.
+# # retries = 3
+#
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## max_parallel_lookups is the maximum number of SNMP requests to
+# ## make at the same time.
+# # max_parallel_lookups = 100
+#
+# ## ordered controls whether or not the metrics need to stay in the
+# ## same order this plugin received them in. If false, this plugin
+# ## may change the order when data is cached. If you need metrics to
+# ## stay in order set this to true. keeping the metrics ordered may
+# ## be slightly slower
+# # ordered = false
+#
+# ## cache_ttl is the amount of time interface names are cached for a
+# ## given agent. After this period elapses if names are needed they
+# ## will be retrieved again.
+# # cache_ttl = "8h"
+
+
+# # Apply metric modifications using override semantics.
+# [[processors.override]]
+# ## All modifications on inputs and aggregators can be overridden:
+# # name_override = "new_name"
+# # name_prefix = "new_name_prefix"
+# # name_suffix = "new_name_suffix"
+#
+# ## Tags to be added (all values must be strings)
+# # [processors.override.tags]
+# # additional_tag = "tag_value"
+
+
+# # Parse a value in a specified field/tag(s) and add the result in a new metric
+# [[processors.parser]]
+# ## The name of the fields whose value will be parsed.
+# parse_fields = []
+#
+# ## If true, incoming metrics are not emitted.
+# drop_original = false
+#
+# ## If set to override, emitted metrics will be merged by overriding the
+# ## original metric using the newly parsed metrics.
+# merge = "override"
+#
+# ## The dataformat to be read from files
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Rotate a single valued metric into a multi field metric
+# [[processors.pivot]]
+# ## Tag to use for naming the new field.
+# tag_key = "name"
+# ## Field to use as the value of the new field.
+# value_key = "value"
+
+
+# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file
+# [[processors.port_name]]
+# [[processors.port_name]]
+# ## Name of tag holding the port number
+# # tag = "port"
+# ## Or name of the field holding the port number
+# # field = "port"
+#
+# ## Name of output tag or field (depending on the source) where service name will be added
+# # dest = "service"
+#
+# ## Default tcp or udp
+# # default_protocol = "tcp"
+#
+# ## Tag containing the protocol (tcp or udp, case-insensitive)
+# # protocol_tag = "proto"
+#
+# ## Field containing the protocol (tcp or udp, case-insensitive)
+# # protocol_field = "proto"
+
+
+# # Print all metrics that pass through this filter.
+# [[processors.printer]]
+
+
+# # Transforms tag and field values with regex pattern
+# [[processors.regex]]
+# ## Tag and field conversions defined in a separate sub-tables
+# # [[processors.regex.tags]]
+# # ## Tag to change
+# # key = "resp_code"
+# # ## Regular expression to match on a tag value
+# # pattern = "^(\\d)\\d\\d$"
+# # ## Matches of the pattern will be replaced with this string. Use ${1}
+# # ## notation to use the text of the first submatch.
+# # replacement = "${1}xx"
+#
+# # [[processors.regex.fields]]
+# # ## Field to change
+# # key = "request"
+# # ## All the power of the Go regular expressions available here
+# # ## For example, named subgroups
+# # pattern = "^/api(?P/[\\w/]+)\\S*"
+# # replacement = "${method}"
+# # ## If result_key is present, a new field will be created
+# # ## instead of changing existing field
+# # result_key = "method"
+#
+# ## Multiple conversions may be applied for one field sequentially
+# ## Let's extract one more value
+# # [[processors.regex.fields]]
+# # key = "request"
+# # pattern = ".*category=(\\w+).*"
+# # replacement = "${1}"
+# # result_key = "search_category"
+
+
+# # Rename measurements, tags, and fields that pass through this filter.
+# [[processors.rename]]
+
+
+# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name
+# [[processors.reverse_dns]]
+# ## For optimal performance, you may want to limit which metrics are passed to this
+# ## processor. eg:
+# ## namepass = ["my_metric_*"]
+#
+# ## cache_ttl is how long the dns entries should stay cached for.
+# ## generally longer is better, but if you expect a large number of diverse lookups
+# ## you'll want to consider memory use.
+# cache_ttl = "24h"
+#
+# ## lookup_timeout is how long should you wait for a single dns request to repsond.
+# ## this is also the maximum acceptable latency for a metric travelling through
+# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will
+# ## be passed on unaltered.
+# ## multiple simultaneous resolution requests for the same IP will only make a
+# ## single rDNS request, and they will all wait for the answer for this long.
+# lookup_timeout = "3s"
+#
+# ## max_parallel_lookups is the maximum number of dns requests to be in flight
+# ## at the same time. Requesting hitting cached values do not count against this
+# ## total, and neither do mulptiple requests for the same IP.
+# ## It's probably best to keep this number fairly low.
+# max_parallel_lookups = 10
+#
+# ## ordered controls whether or not the metrics need to stay in the same order
+# ## this plugin received them in. If false, this plugin will change the order
+# ## with requests hitting cached results moving through immediately and not
+# ## waiting on slower lookups. This may cause issues for you if you are
+# ## depending on the order of metrics staying the same. If so, set this to true.
+# ## keeping the metrics ordered may be slightly slower.
+# ordered = false
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the field "source_ip", and put the result in the field "source_name"
+# field = "source_ip"
+# dest = "source_name"
+#
+# [[processors.reverse_dns.lookup]]
+# ## get the ip from the tag "destination_ip", and put the result in the tag
+# ## "destination_name".
+# tag = "destination_ip"
+# dest = "destination_name"
+#
+# ## If you would prefer destination_name to be a field instead, you can use a
+# ## processors.converter after this one, specifying the order attribute.
+
+
+# # Add the S2 Cell ID as a tag based on latitude and longitude fields
+# [[processors.s2geo]]
+# ## The name of the lat and lon fields containing WGS-84 latitude and
+# ## longitude in decimal degrees.
+# # lat_field = "lat"
+# # lon_field = "lon"
+#
+# ## New tag to create
+# # tag_key = "s2_cell_id"
+#
+# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html)
+# # cell_level = 9
+
+
+# # Process metrics using a Starlark script
+# [[processors.starlark]]
+# ## The Starlark source can be set as a string in this configuration file, or
+# ## by referencing a file containing the script. Only one source or script
+# ## should be set at once.
+# ##
+# ## Source of the Starlark script.
+# source = '''
+# def apply(metric):
+# return metric
+# '''
+#
+# ## File containing a Starlark script.
+# # script = "/usr/local/bin/myscript.star"
+
+
+# # Perform string processing on tags, fields, and measurements
+# [[processors.strings]]
+# ## Convert a tag value to uppercase
+# # [[processors.strings.uppercase]]
+# # tag = "method"
+#
+# ## Convert a field value to lowercase and store in a new field
+# # [[processors.strings.lowercase]]
+# # field = "uri_stem"
+# # dest = "uri_stem_normalised"
+#
+# ## Convert a field value to titlecase
+# # [[processors.strings.titlecase]]
+# # field = "status"
+#
+# ## Trim leading and trailing whitespace using the default cutset
+# # [[processors.strings.trim]]
+# # field = "message"
+#
+# ## Trim leading characters in cutset
+# # [[processors.strings.trim_left]]
+# # field = "message"
+# # cutset = "\t"
+#
+# ## Trim trailing characters in cutset
+# # [[processors.strings.trim_right]]
+# # field = "message"
+# # cutset = "\r\n"
+#
+# ## Trim the given prefix from the field
+# # [[processors.strings.trim_prefix]]
+# # field = "my_value"
+# # prefix = "my_"
+#
+# ## Trim the given suffix from the field
+# # [[processors.strings.trim_suffix]]
+# # field = "read_count"
+# # suffix = "_count"
+#
+# ## Replace all non-overlapping instances of old with new
+# # [[processors.strings.replace]]
+# # measurement = "*"
+# # old = ":"
+# # new = "_"
+#
+# ## Trims strings based on width
+# # [[processors.strings.left]]
+# # field = "message"
+# # width = 10
+#
+# ## Decode a base64 encoded utf-8 string
+# # [[processors.strings.base64decode]]
+# # field = "message"
+
+
+# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
+# [[processors.tag_limit]]
+# ## Maximum number of tags to preserve
+# limit = 10
+#
+# ## List of tags to preferentially preserve
+# keep = ["foo", "bar", "baz"]
+
+
+# # Uses a Go template to create a new tag
+# [[processors.template]]
+# ## Tag to set with the output of the template.
+# tag = "topic"
+#
+# ## Go template used to create the tag value. In order to ease TOML
+# ## escaping requirements, you may wish to use single quotes around the
+# ## template string.
+# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}'
+
+
+# # Print all metrics that pass through this filter.
+# [[processors.topk]]
+# ## How many seconds between aggregations
+# # period = 10
+#
+# ## How many top metrics to return
+# # k = 10
+#
+# ## Over which tags should the aggregation be done. Globs can be specified, in
+# ## which case any tag matching the glob will aggregated over. If set to an
+# ## empty list is no aggregation over tags is done
+# # group_by = ['*']
+#
+# ## Over which fields are the top k are calculated
+# # fields = ["value"]
+#
+# ## What aggregation to use. Options: sum, mean, min, max
+# # aggregation = "mean"
+#
+# ## Instead of the top k largest metrics, return the bottom k lowest metrics
+# # bottomk = false
+#
+# ## The plugin assigns each metric a GroupBy tag generated from its name and
+# ## tags. If this setting is different than "" the plugin will add a
+# ## tag (which name will be the value of this setting) to each metric with
+# ## the value of the calculated GroupBy tag. Useful for debugging
+# # add_groupby_tag = ""
+#
+# ## These settings provide a way to know the position of each metric in
+# ## the top k. The 'add_rank_field' setting allows to specify for which
+# ## fields the position is required. If the list is non empty, then a field
+# ## will be added to each and every metric for each string present in this
+# ## setting. This field will contain the ranking of the group that
+# ## the metric belonged to when aggregated over that field.
+# ## The name of the field will be set to the name of the aggregation field,
+# ## suffixed with the string '_topk_rank'
+# # add_rank_fields = []
+#
+# ## These settings provide a way to know what values the plugin is generating
+# ## when aggregating metrics. The 'add_aggregate_field' setting allows to
+# ## specify for which fields the final aggregation value is required. If the
+# ## list is non empty, then a field will be added to each every metric for
+# ## each field present in this setting. This field will contain
+# ## the computed aggregation for the group that the metric belonged to when
+# ## aggregated over that field.
+# ## The name of the field will be set to the name of the aggregation field,
+# ## suffixed with the string '_topk_aggregate'
+# # add_aggregate_fields = []
+
+
+# # Rotate multi field metric into several single field metrics
+# [[processors.unpivot]]
+# ## Tag to use for the name.
+# tag_key = "name"
+# ## Field to use for the name of the value.
+# value_key = "value"
+
+
+###############################################################################
+# AGGREGATOR PLUGINS #
+###############################################################################
+
+
+# # Keep the aggregate basicstats of each metric passing through.
+# [[aggregators.basicstats]]
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+#
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## Configures which basic stats to push as fields
+# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
+
+
+# # Report the final metric of a series
+# [[aggregators.final]]
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## The time that a series is not updated until considering it final.
+# series_timeout = "5m"
+
+
+# # Create aggregate histograms.
+# [[aggregators.histogram]]
+# ## The period in which to flush the aggregator.
+# period = "30s"
+#
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+#
+# ## If true, the histogram will be reset on flush instead
+# ## of accumulating the results.
+# reset = false
+#
+# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added.
+# ## Defaults to true.
+# cumulative = true
+#
+# ## Example config that aggregates all fields of the metric.
+# # [[aggregators.histogram.config]]
+# # ## Right borders of buckets (with +Inf implicitly added).
+# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
+# # ## The name of metric.
+# # measurement_name = "cpu"
+#
+# ## Example config that aggregates only specific fields of the metric.
+# # [[aggregators.histogram.config]]
+# # ## Right borders of buckets (with +Inf implicitly added).
+# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
+# # ## The name of metric.
+# # measurement_name = "diskio"
+# # ## The concrete fields of metric
+# # fields = ["io_time", "read_time", "write_time"]
+
+
+# # Merge metrics into multifield metrics by series key
+# [[aggregators.merge]]
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = true
+
+
+# # Keep the aggregate min/max of each metric passing through.
+# [[aggregators.minmax]]
+# ## General Aggregator Arguments:
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+
+
+# # Count the occurrence of values in fields.
+# [[aggregators.valuecounter]]
+# ## General Aggregator Arguments:
+# ## The period on which to flush & clear the aggregator.
+# period = "30s"
+# ## If true, the original metric will be dropped by the
+# ## aggregator and will not get sent to the output plugins.
+# drop_original = false
+# ## The fields for which the values will be counted
+# fields = []
+
+
+###############################################################################
+# INPUT PLUGINS #
+###############################################################################
+
+
+# Read metrics about cpu usage
+[[inputs.cpu]]
+ ## Whether to report per-cpu stats or not
+ percpu = true
+ ## Whether to report total system cpu stats or not
+ totalcpu = true
+ ## If true, collect raw CPU time metrics.
+ collect_cpu_time = false
+ ## If true, compute and report the sum of all non-idle CPU states.
+ report_active = false
+
+
+# Read metrics about disk usage by mount point
+[[inputs.disk]]
+ ## By default stats will be gathered for all mount points.
+ ## Set mount_points will restrict the stats to only the specified mount points.
+ # mount_points = ["/"]
+
+ ## Ignore mount points by filesystem type.
+ ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
+
+
+# Read metrics about disk IO by device
+[[inputs.diskio]]
+ ## By default, telegraf will gather stats for all devices including
+ ## disk partitions.
+ ## Setting devices will restrict the stats to the specified devices.
+ # devices = ["sda", "sdb", "vd*"]
+ ## Uncomment the following line if you need disk serial numbers.
+ # skip_serial_number = false
+ #
+ ## On systems which support it, device metadata can be added in the form of
+ ## tags.
+ ## Currently only Linux is supported via udev properties. You can view
+ ## available properties for a device by running:
+ ## 'udevadm info -q property -n /dev/sda'
+ ## Note: Most, but not all, udev properties can be accessed this way. Properties
+ ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
+ # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
+ #
+ ## Using the same metadata source as device_tags, you can also customize the
+ ## name of the device via templates.
+ ## The 'name_templates' parameter is a list of templates to try and apply to
+ ## the device. The template may contain variables in the form of '$PROPERTY' or
+ ## '${PROPERTY}'. The first template which does not contain any variables not
+ ## present for the device is used as the device name tag.
+ ## The typical use case is for LVM volumes, to get the VG/LV name instead of
+ ## the near-meaningless DM-0 name.
+ # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
+
+
+# Get kernel statistics from /proc/stat
+[[inputs.kernel]]
+ # no configuration
+
+
+# Read metrics about memory usage
+[[inputs.mem]]
+ # no configuration
+
+
+# Get the number of processes and group them by status
+[[inputs.processes]]
+ # no configuration
+
+
+# Read metrics about swap memory usage
+[[inputs.swap]]
+ # no configuration
+
+
+# Read metrics about system load & uptime
+[[inputs.system]]
+ ## Uncomment to remove deprecated metrics.
+ # fielddrop = ["uptime_format"]
+
+
+# # Gather ActiveMQ metrics
+# [[inputs.activemq]]
+# ## ActiveMQ WebConsole URL
+# url = "http://127.0.0.1:8161"
+#
+# ## Required ActiveMQ Endpoint
+# ## deprecated in 1.11; use the url option
+# # server = "127.0.0.1"
+# # port = 8161
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## Required ActiveMQ webadmin root path
+# # webadmin = "admin"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read stats from aerospike server(s)
+# [[inputs.aerospike]]
+# ## Aerospike servers to connect to (with port)
+# ## This plugin will query all namespaces the aerospike
+# ## server has configured and get stats for them.
+# servers = ["localhost:3000"]
+#
+# # username = "telegraf"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # enable_tls = false
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+#
+# # Feature Options
+# # Add namespace variable to limit the namespaces executed on
+# # Leave blank to do all
+# # disable_query_namespaces = true # default false
+# # namespaces = ["namespace1", "namespace2"]
+#
+# # Enable set level telmetry
+# # query_sets = true # default: false
+# # Add namespace set combinations to limit sets executed on
+# # Leave blank to do all sets
+# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"]
+#
+# # Histograms
+# # enable_ttl_histogram = true # default: false
+# # enable_object_size_linear_histogram = true # default: false
+#
+# # by default, aerospike produces a 100 bucket histogram
+# # this is not great for most graphing tools, this will allow
+# # the ability to squash this to a smaller number of buckets
+# # num_histogram_buckets = 100 # default: 10
+
+
+# # Read Apache status information (mod_status)
+# [[inputs.apache]]
+# ## An array of URLs to gather from, must be directed at the machine
+# ## readable version of the mod_status page including the auto query string.
+# ## Default is "http://localhost/server-status?auto".
+# urls = ["http://localhost/server-status?auto"]
+#
+# ## Credentials for basic HTTP authentication.
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Monitor APC UPSes connected to apcupsd
+# [[inputs.apcupsd]]
+# # A list of running apcupsd server to connect to.
+# # If not provided will default to tcp://127.0.0.1:3551
+# servers = ["tcp://127.0.0.1:3551"]
+#
+# ## Timeout for dialing server.
+# timeout = "5s"
+
+
+# # Gather metrics from Apache Aurora schedulers
+# [[inputs.aurora]]
+# ## Schedulers are the base addresses of your Aurora Schedulers
+# schedulers = ["http://127.0.0.1:8081"]
+#
+# ## Set of role types to collect metrics from.
+# ##
+# ## The scheduler roles are checked each interval by contacting the
+# ## scheduler nodes; zookeeper is not contacted.
+# # roles = ["leader", "follower"]
+#
+# ## Timeout is the max time for total network operations.
+# # timeout = "5s"
+#
+# ## Username and password are sent using HTTP Basic Auth.
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Gather Azure Storage Queue metrics
+# [[inputs.azure_storage_queue]]
+# ## Required Azure Storage Account name
+# account_name = "mystorageaccount"
+#
+# ## Required Azure Storage Account access key
+# account_key = "storageaccountaccesskey"
+#
+# ## Set to false to disable peeking age of oldest message (executes faster)
+# # peek_oldest_message_age = true
+
+
+# # Read metrics of bcache from stats_total and dirty_data
+# [[inputs.bcache]]
+# ## Bcache sets path
+# ## If not specified, then default is:
+# bcachePath = "/sys/fs/bcache"
+#
+# ## By default, telegraf gather stats for all bcache devices
+# ## Setting devices will restrict the stats to the specified
+# ## bcache devices.
+# bcacheDevs = ["bcache0"]
+
+
+# # Collects Beanstalkd server and tubes stats
+# [[inputs.beanstalkd]]
+# ## Server to collect data from
+# server = "localhost:11300"
+#
+# ## List of tubes to gather stats about.
+# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
+# tubes = ["notifications"]
+
+
+# # Read BIND nameserver XML statistics
+# [[inputs.bind]]
+# ## An array of BIND XML statistics URI to gather stats.
+# ## Default is "http://localhost:8053/xml/v3".
+# # urls = ["http://localhost:8053/xml/v3"]
+# # gather_memory_contexts = false
+# # gather_views = false
+
+
+# # Collect bond interface status, slaves statuses and failures count
+# [[inputs.bond]]
+# ## Sets 'proc' directory path
+# ## If not specified, then default is /proc
+# # host_proc = "/proc"
+#
+# ## By default, telegraf gather stats for all bond interfaces
+# ## Setting interfaces will restrict the stats to the specified
+# ## bond interfaces.
+# # bond_interfaces = ["bond0"]
+
+
+# # Collect Kafka topics and consumers status from Burrow HTTP API.
+# [[inputs.burrow]]
+# ## Burrow API endpoints in format "schema://host:port".
+# ## Default is "http://localhost:8000".
+# servers = ["http://localhost:8000"]
+#
+# ## Override Burrow API prefix.
+# ## Useful when Burrow is behind reverse-proxy.
+# # api_prefix = "/v3/kafka"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Limit per-server concurrent connections.
+# ## Useful in case of large number of topics or consumer groups.
+# # concurrent_connections = 20
+#
+# ## Filter clusters, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # clusters_include = []
+# # clusters_exclude = []
+#
+# ## Filter consumer groups, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # groups_include = []
+# # groups_exclude = []
+#
+# ## Filter topics, default is no filtering.
+# ## Values can be specified as glob patterns.
+# # topics_include = []
+# # topics_exclude = []
+#
+# ## Credentials for basic HTTP authentication.
+# # username = ""
+# # password = ""
+#
+# ## Optional SSL config
+# # ssl_ca = "/etc/telegraf/ca.pem"
+# # ssl_cert = "/etc/telegraf/cert.pem"
+# # ssl_key = "/etc/telegraf/key.pem"
+# # insecure_skip_verify = false
+
+
+# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster.
+# [[inputs.ceph]]
+# ## This is the recommended interval to poll. Too frequent and you will lose
+# ## data points due to timeouts during rebalancing and recovery
+# interval = '1m'
+#
+# ## All configuration values are optional, defaults are shown below
+#
+# ## location of ceph binary
+# ceph_binary = "/usr/bin/ceph"
+#
+# ## directory in which to look for socket files
+# socket_dir = "/var/run/ceph"
+#
+# ## prefix of MON and OSD socket files, used to determine socket type
+# mon_prefix = "ceph-mon"
+# osd_prefix = "ceph-osd"
+# mds_prefix = "ceph-mds"
+# rgw_prefix = "ceph-client"
+#
+# ## suffix used to identify socket files
+# socket_suffix = "asok"
+#
+# ## Ceph user to authenticate as
+# ceph_user = "client.admin"
+#
+# ## Ceph configuration to use to locate the cluster
+# ceph_config = "/etc/ceph/ceph.conf"
+#
+# ## Whether to gather statistics via the admin socket
+# gather_admin_socket_stats = true
+#
+# ## Whether to gather statistics via ceph commands
+# gather_cluster_stats = false
+
+
+# # Read specific statistics per cgroup
+# [[inputs.cgroup]]
+# ## Directories in which to look for files, globs are supported.
+# ## Consider restricting paths to the set of cgroups you really
+# ## want to monitor if you have a large number of cgroups, to avoid
+# ## any cardinality issues.
+# # paths = [
+# # "/sys/fs/cgroup/memory",
+# # "/sys/fs/cgroup/memory/child1",
+# # "/sys/fs/cgroup/memory/child2/*",
+# # ]
+# ## cgroup stat fields, as file names, globs are supported.
+# ## these file names are appended to each path from above.
+# # files = ["memory.*usage*", "memory.limit_in_bytes"]
+
+
+# # Get standard chrony metrics, requires chronyc executable.
+# [[inputs.chrony]]
+# ## If true, chronyc tries to perform a DNS lookup for the time server.
+# # dns_lookup = false
+
+
+# # Pull Metric Statistics from Amazon CloudWatch
+# [[inputs.cloudwatch]]
+# ## Amazon Region
+# region = "us-east-1"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# # access_key = ""
+# # secret_key = ""
+# # token = ""
+# # role_arn = ""
+# # profile = ""
+# # shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
+# # metrics are made available to the 1 minute period. Some are collected at
+# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
+# # Note that if a period is configured that is smaller than the minimum for a
+# # particular metric, that metric will not be returned by the Cloudwatch API
+# # and will not be collected by Telegraf.
+# #
+# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
+# period = "5m"
+#
+# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
+# delay = "5m"
+#
+# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
+# ## gaps or overlap in pulled data
+# interval = "5m"
+#
+# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored.
+# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours.
+# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain.
+# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old.
+# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html
+# #recently_active = "PT3H"
+#
+# ## Configure the TTL for the internal cache of metrics.
+# # cache_ttl = "1h"
+#
+# ## Metric Statistic Namespace (required)
+# namespace = "AWS/ELB"
+#
+# ## Maximum requests per second. Note that the global default AWS rate limit is
+# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
+# ## maximum of 50.
+# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
+# # ratelimit = 25
+#
+# ## Timeout for http requests made by the cloudwatch client.
+# # timeout = "5s"
+#
+# ## Namespace-wide statistic filters. These allow fewer queries to be made to
+# ## cloudwatch.
+# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # statistic_exclude = []
+#
+# ## Metrics to Pull
+# ## Defaults to all Metrics in Namespace if nothing is provided
+# ## Refreshes Namespace available metrics every 1h
+# #[[inputs.cloudwatch.metrics]]
+# # names = ["Latency", "RequestCount"]
+# #
+# # ## Statistic filters for Metric. These allow for retrieving specific
+# # ## statistics for an individual metric.
+# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
+# # # statistic_exclude = []
+# #
+# # ## Dimension filters for Metric. All dimensions defined for the metric names
+# # ## must be specified in order to retrieve the metric statistics.
+# # [[inputs.cloudwatch.metrics.dimensions]]
+# # name = "LoadBalancerName"
+# # value = "p-example"
+
+
+# # Collects conntrack stats from the configured directories and files.
+# [[inputs.conntrack]]
+# ## The following defaults would work with multiple versions of conntrack.
+# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
+# ## kernel versions, as are the directory locations.
+#
+# ## Superset of filenames to look for within the conntrack dirs.
+# ## Missing files will be ignored.
+# files = ["ip_conntrack_count","ip_conntrack_max",
+# "nf_conntrack_count","nf_conntrack_max"]
+#
+# ## Directories to search within for the conntrack files above.
+# ## Missing directories will be ignored.
+# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
+
+
+# # Gather health check statuses from services registered in Consul
+# [[inputs.consul]]
+# ## Consul server address
+# # address = "localhost:8500"
+#
+# ## URI scheme for the Consul server, one of "http", "https"
+# # scheme = "http"
+#
+# ## Metric version controls the mapping from Consul metrics into
+# ## Telegraf metrics.
+# ##
+# ## example: metric_version = 1; deprecated in 1.15
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## ACL token used in every request
+# # token = ""
+#
+# ## HTTP Basic Authentication username and password.
+# # username = ""
+# # password = ""
+#
+# ## Data center to query the health checks from
+# # datacenter = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+#
+# ## Consul checks' tag splitting
+# # When tags are formatted like "key:value" with ":" as a delimiter then
+# # they will be splitted and reported as proper key:value in Telegraf
+# # tag_delimiter = ":"
+
+
+# # Read metrics from one or many couchbase clusters
+# [[inputs.couchbase]]
+# ## specify servers via a url matching:
+# ## [protocol://][:password]@address[:port]
+# ## e.g.
+# ## http://couchbase-0.example.com/
+# ## http://admin:secret@couchbase-0.example.com:8091/
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no protocol is specified, HTTP is used.
+# ## If no port is specified, 8091 is used.
+# servers = ["http://localhost:8091"]
+
+
+# # Read CouchDB Stats from one or more servers
+# [[inputs.couchdb]]
+# ## Works with CouchDB stats endpoints out of the box
+# ## Multiple Hosts from which to read CouchDB stats:
+# hosts = ["http://localhost:8086/_stats"]
+#
+# ## Use HTTP Basic Authentication.
+# # basic_username = "telegraf"
+# # basic_password = "p@ssw0rd"
+
+
+# # Input plugin for DC/OS metrics
+# [[inputs.dcos]]
+# ## The DC/OS cluster URL.
+# cluster_url = "https://dcos-ee-master-1"
+#
+# ## The ID of the service account.
+# service_account_id = "telegraf"
+# ## The private key file for the service account.
+# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
+#
+# ## Path containing login token. If set, will read on every gather.
+# # token_file = "/home/dcos/.dcos/token"
+#
+# ## In all filter options if both include and exclude are empty all items
+# ## will be collected. Arrays may contain glob patterns.
+# ##
+# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
+# ## be collected for its containers or apps.
+# # node_include = []
+# # node_exclude = []
+# ## Container IDs to collect container metrics from.
+# # container_include = []
+# # container_exclude = []
+# ## Container IDs to collect app metrics from.
+# # app_include = []
+# # app_exclude = []
+#
+# ## Maximum concurrent connections to the cluster.
+# # max_connections = 10
+# ## Maximum time to receive a response from cluster.
+# # response_timeout = "20s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+#
+# ## Recommended filtering to reduce series cardinality.
+# # [inputs.dcos.tagdrop]
+# # path = ["/var/lib/mesos/slave/slaves/*"]
+
+
+# # Read metrics from one or many disque servers
+# [[inputs.disque]]
+# ## An array of URI to gather stats about. Specify an ip or hostname
+# ## with optional port and password.
+# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["localhost"]
+
+
+# # Provide a native collection for dmsetup based statistics for dm-cache
+# [[inputs.dmcache]]
+# ## Whether to report per-device stats or not
+# per_device = true
+
+
+# # Query given DNS server and gives statistics
+# [[inputs.dns_query]]
+# ## servers to query
+# servers = ["8.8.8.8"]
+#
+# ## Network is the network protocol name.
+# # network = "udp"
+#
+# ## Domains or subdomains to query.
+# # domains = ["."]
+#
+# ## Query record type.
+# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
+# # record_type = "A"
+#
+# ## Dns server port.
+# # port = 53
+#
+# ## Query timeout in seconds.
+# # timeout = 2
+
+
+# # Read metrics about docker containers
+# [[inputs.docker]]
+# ## Docker Endpoint
+# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+# endpoint = "unix:///var/run/docker.sock"
+#
+# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
+# gather_services = false
+#
+# ## Only collect metrics for these containers, collect all if empty
+# container_names = []
+#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# container_name_include = []
+# container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "running" state will be captured.
+# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
+# # container_state_include = []
+# # container_state_exclude = []
+#
+# ## Timeout for docker list, info, and stats commands
+# timeout = "5s"
+#
+# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
+# ## network (eth0, eth1, ...) stats or not
+# perdevice = true
+#
+# ## Whether to report for each container total blkio and network stats or not
+# total = false
+#
+# ## Which environment variables should we use as a tag
+# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
+#
+# ## docker labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# docker_label_include = []
+# docker_label_exclude = []
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read statistics from one or many dovecot servers
+# [[inputs.dovecot]]
+# ## specify dovecot servers via an address:port list
+# ## e.g.
+# ## localhost:24242
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["localhost:24242"]
+#
+# ## Type is one of "user", "domain", "ip", or "global"
+# type = "global"
+#
+# ## Wildcard matches like "*.com". An empty string "" is same as "*"
+# ## If type = "ip" filters should be
+# filters = [""]
+
+
+# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints.
+# [[inputs.ecs]]
+# ## ECS metadata url.
+# ## Metadata v2 API is used if set explicitly. Otherwise,
+# ## v3 metadata endpoint API is used if available.
+# # endpoint_url = ""
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "RUNNING" state will be captured.
+# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
+# ## "RESOURCES_PROVISIONED", "STOPPED".
+# # container_status_include = []
+# # container_status_exclude = []
+#
+# ## ecs labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# ecs_label_include = [ "com.amazonaws.ecs.*" ]
+# ecs_label_exclude = []
+#
+# ## Timeout for queries.
+# # timeout = "5s"
+
+
+# # Read stats from one or more Elasticsearch servers or clusters
+# [[inputs.elasticsearch]]
+# ## specify a list of one or more Elasticsearch servers
+# # you can add username and password to your url to use basic authentication:
+# # servers = ["http://user:pass@localhost:9200"]
+# servers = ["http://localhost:9200"]
+#
+# ## Timeout for HTTP requests to the elastic search server(s)
+# http_timeout = "5s"
+#
+# ## When local is true (the default), the node will read only its own stats.
+# ## Set local to false when you want to read the node stats from all nodes
+# ## of the cluster.
+# local = true
+#
+# ## Set cluster_health to true when you want to also obtain cluster health stats
+# cluster_health = false
+#
+# ## Adjust cluster_health_level when you want to also obtain detailed health stats
+# ## The options are
+# ## - indices (default)
+# ## - cluster
+# # cluster_health_level = "indices"
+#
+# ## Set cluster_stats to true when you want to also obtain cluster stats.
+# cluster_stats = false
+#
+# ## Only gather cluster_stats from the master node. To work this require local = true
+# cluster_stats_only_from_master = true
+#
+# ## Indices to collect; can be one or more indices names or _all
+# indices_include = ["_all"]
+#
+# ## One of "shards", "cluster", "indices"
+# indices_level = "shards"
+#
+# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
+# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
+# ## "breaker". Per default, all stats are gathered.
+# # node_stats = ["jvm", "http"]
+#
+# ## HTTP Basic Authentication username and password.
+# # username = ""
+# # password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Returns ethtool statistics for given interfaces
+# [[inputs.ethtool]]
+# ## List of interfaces to pull metrics for
+# # interface_include = ["eth0"]
+#
+# ## List of interfaces to ignore when pulling metrics.
+# # interface_exclude = ["eth1"]
+
+
+# # Read metrics from one or more commands that can output to stdout
+# [[inputs.exec]]
+# ## Commands array
+# commands = [
+# "/tmp/test.sh",
+# "/usr/bin/mycollector --foo=bar",
+# "/tmp/collect_*.sh"
+# ]
+#
+# ## Timeout for each command to complete.
+# timeout = "5s"
+#
+# ## measurement name suffix (for separating different commands)
+# name_suffix = "_mycollector"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from fail2ban.
+# [[inputs.fail2ban]]
+# ## Use sudo to run fail2ban-client
+# use_sudo = false
+
+
+# # Read devices value(s) from a Fibaro controller
+# [[inputs.fibaro]]
+# ## Required Fibaro controller address/hostname.
+# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
+# url = "http://:80"
+#
+# ## Required credentials to access the API (http://)
+# username = ""
+# password = ""
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+
+
+# # Parse a complete file each interval
+# [[inputs.file]]
+# ## Files to parse each interval. Accept standard unix glob matching rules,
+# ## as well as ** to match recursive files and directories.
+# files = ["/tmp/metrics.out"]
+#
+# ## Name a tag containing the name of the file the data was parsed from. Leave empty
+# ## to disable.
+# # file_tag = ""
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
+#
+# ## The dataformat to be read from files
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Count files in a directory
+# [[inputs.filecount]]
+# ## Directory to gather stats about.
+# ## deprecated in 1.9; use the directories option
+# # directory = "/var/cache/apt/archives"
+#
+# ## Directories to gather stats about.
+# ## This accept standard unit glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories
+# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories
+# ## /var/log -> count all files in /var/log and all of its subdirectories
+# directories = ["/var/cache/apt/archives"]
+#
+# ## Only count files that match the name pattern. Defaults to "*".
+# name = "*.deb"
+#
+# ## Count files in subdirectories. Defaults to true.
+# recursive = false
+#
+# ## Only count regular files. Defaults to true.
+# regular_only = true
+#
+# ## Follow all symlinks while walking the directory tree. Defaults to false.
+# follow_symlinks = false
+#
+# ## Only count files that are at least this size. If size is
+# ## a negative number, only count files that are smaller than the
+# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
+# ## Without quotes and units, interpreted as size in bytes.
+# size = "0B"
+#
+# ## Only count files that have not been touched for at least this
+# ## duration. If mtime is negative, only count files that have been
+# ## touched in this duration. Defaults to "0s".
+# mtime = "0s"
+
+
+# # Read stats about given file(s)
+# [[inputs.filestat]]
+# ## Files to gather stats about.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## "/var/log/**.log" -> recursively find all .log files in /var/log
+# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+# ## "/var/log/apache.log" -> just tail the apache log file
+# ##
+# ## See https://github.com/gobwas/glob for more examples
+# ##
+# files = ["/var/log/**.log"]
+#
+# ## If true, read the entire file and calculate an md5 checksum.
+# md5 = false
+
+
+# # Read real time temps from fireboard.io servers
+# [[inputs.fireboard]]
+# ## Specify auth token for your account
+# auth_token = "invalidAuthToken"
+# ## You can override the fireboard server URL if necessary
+# # url = https://fireboard.io/api/v1/devices.json
+# ## You can set a different http_timeout if you need to
+# ## You should set a string using an number and time indicator
+# ## for example "12s" for 12 seconds.
+# # http_timeout = "4s"
+
+
+# # Read metrics exposed by fluentd in_monitor plugin
+# [[inputs.fluentd]]
+# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
+# ##
+# ## Endpoint:
+# ## - only one URI is allowed
+# ## - https is not supported
+# endpoint = "http://localhost:24220/api/plugins.json"
+#
+# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
+# exclude = [
+# "monitor_agent",
+# "dummy",
+# ]
+
+
+# # Gather repository information from GitHub hosted repositories.
+# [[inputs.github]]
+# ## List of repositories to monitor.
+# repositories = [
+# "influxdata/telegraf",
+# "influxdata/influxdb"
+# ]
+#
+# ## Github API access token. Unauthenticated requests are limited to 60 per hour.
+# # access_token = ""
+#
+# ## Github API enterprise url. Github Enterprise accounts must specify their base url.
+# # enterprise_base_url = ""
+#
+# ## Timeout for HTTP requests.
+# # http_timeout = "5s"
+
+
+# # Read flattened metrics from one or more GrayLog HTTP endpoints
+# [[inputs.graylog]]
+# ## API endpoint, currently supported API:
+# ##
+# ## - multiple (Ex http://:12900/system/metrics/multiple)
+# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace})
+# ##
+# ## For namespace endpoint, the metrics array will be ignored for that call.
+# ## Endpoint can contain namespace and multiple type calls.
+# ##
+# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
+# ## of endpoints
+# servers = [
+# "http://[graylog-server-ip]:12900/system/metrics/multiple",
+# ]
+#
+# ## Metrics list
+# ## List of metrics can be found on Graylog webservice documentation.
+# ## Or by hitting the the web service api at:
+# ## http://[graylog-host]:12900/system/metrics
+# metrics = [
+# "jvm.cl.loaded",
+# "jvm.memory.pools.Metaspace.committed"
+# ]
+#
+# ## Username and password
+# username = ""
+# password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics of haproxy, via socket or csv stats page
+# [[inputs.haproxy]]
+# ## An array of address to gather stats about. Specify an ip on hostname
+# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
+# ## Make sure you specify the complete path to the stats endpoint
+# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
+#
+# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
+# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## You can also use local socket with standard wildcard globbing.
+# ## Server address not starting with 'http' will be treated as a possible
+# ## socket, so both examples below are valid.
+# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
+#
+# ## By default, some of the fields are renamed from what haproxy calls them.
+# ## Setting this option to true results in the plugin keeping the original
+# ## field names.
+# # keep_field_names = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Monitor disks' temperatures using hddtemp
+# [[inputs.hddtemp]]
+# ## By default, telegraf gathers temps data from all disks detected by the
+# ## hddtemp.
+# ##
+# ## Only collect temps from the selected disks.
+# ##
+# ## A * as the device name will return the temperature values of all disks.
+# ##
+# # address = "127.0.0.1:7634"
+# # devices = ["sda", "*"]
+
+
+# # Read formatted metrics from one or more HTTP endpoints
+# [[inputs.http]]
+# ## One or more URLs from which to read formatted metrics
+# urls = [
+# "http://localhost/metrics"
+# ]
+#
+# ## HTTP method
+# # method = "GET"
+#
+# ## Optional HTTP headers
+# # headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## HTTP entity-body to send with POST/PUT requests.
+# # body = ""
+#
+# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
+# ## compress body or "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## List of success status codes
+# # success_status_codes = [200]
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+
+
+# # HTTP/HTTPS request given an address a method and a timeout
+# [[inputs.http_response]]
+# ## Deprecated in 1.12, use 'urls'
+# ## Server address (default http://localhost)
+# # address = "http://localhost"
+#
+# ## List of urls to query.
+# # urls = ["http://localhost"]
+#
+# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
+# # http_proxy = "http://localhost:8888"
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## HTTP Request Method
+# # method = "GET"
+#
+# ## Whether to follow redirects from the server (defaults to false)
+# # follow_redirects = false
+#
+# ## Optional file with Bearer token
+# ## file content is added as an Authorization header
+# # bearer_token = "/path/to/file"
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional HTTP Request Body
+# # body = '''
+# # {'fake':'data'}
+# # '''
+#
+# ## Optional name of the field that will contain the body of the response.
+# ## By default it is set to an empty String indicating that the body's content won't be added
+# # response_body_field = ''
+#
+# ## Maximum allowed HTTP response body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# ## If the response body size exceeds this limit a "body_read_error" will be raised
+# # response_body_max_size = "32MiB"
+#
+# ## Optional substring or regex match in body of the response (case sensitive)
+# # response_string_match = "\"service_status\": \"up\""
+# # response_string_match = "ok"
+# # response_string_match = "\".*_status\".?:.?\"up\""
+#
+# ## Expected response status code.
+# ## The status code of the response is compared to this value. If they match, the field
+# ## "response_status_code_match" will be 1, otherwise it will be 0. If the
+# ## expected status code is 0, the check is disabled and the field won't be added.
+# # response_status_code = 0
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## HTTP Request Headers (all values must be strings)
+# # [inputs.http_response.headers]
+# # Host = "github.com"
+#
+# ## Optional setting to map response http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
+# ## Interface to use when dialing an address
+# # interface = "eth0"
+
+
+# # Read flattened metrics from one or more JSON HTTP endpoints
+# [[inputs.httpjson]]
+# ## NOTE This plugin only reads numerical measurements, strings and booleans
+# ## will be ignored.
+#
+# ## Name for the service being polled. Will be appended to the name of the
+# ## measurement e.g. httpjson_webserver_stats
+# ##
+# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
+# name = "webserver_stats"
+#
+# ## URL of each server in the service's cluster
+# servers = [
+# "http://localhost:9999/stats/",
+# "http://localhost:9998/stats/",
+# ]
+# ## Set response_timeout (default 5 seconds)
+# response_timeout = "5s"
+#
+# ## HTTP method to use: GET or POST (case-sensitive)
+# method = "GET"
+#
+# ## List of tag names to extract from top-level of JSON server response
+# # tag_keys = [
+# # "my_tag_1",
+# # "my_tag_2"
+# # ]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## HTTP parameters (all values must be strings). For "GET" requests, data
+# ## will be included in the query. For "POST" requests, data will be included
+# ## in the request body as "x-www-form-urlencoded".
+# # [inputs.httpjson.parameters]
+# # event_type = "cpu_spike"
+# # threshold = "0.75"
+#
+# ## HTTP Headers (all values must be strings)
+# # [inputs.httpjson.headers]
+# # X-Auth-Token = "my-xauth-token"
+# # apiVersion = "v1"
+
+
+# # Gather Icinga2 status
+# [[inputs.icinga2]]
+# ## Required Icinga2 server address
+# # server = "https://localhost:5665"
+#
+# ## Required Icinga2 object type ("services" or "hosts")
+# # object_type = "services"
+#
+# ## Credentials for basic HTTP authentication
+# # username = "admin"
+# # password = "admin"
+#
+# ## Maximum time to receive response.
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+
+
+# # Gets counters from all InfiniBand cards and ports installed
+# [[inputs.infiniband]]
+# # no configuration
+
+
+# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
+# [[inputs.influxdb]]
+# ## Works with InfluxDB debug endpoints out of the box,
+# ## but other services can use this format too.
+# ## See the influxdb plugin's README for more details.
+#
+# ## Multiple URLs from which to read InfluxDB-formatted JSON
+# ## Default is "http://localhost:8086/debug/vars".
+# urls = [
+# "http://localhost:8086/debug/vars"
+# ]
+#
+# ## Username and password to send using HTTP Basic Authentication.
+# # username = ""
+# # password = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## http request & header timeout
+# timeout = "5s"
+
+
+# # Collect statistics about itself
+# [[inputs.internal]]
+# ## If true, collect telegraf memory stats.
+# # collect_memstats = true
+
+
+# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
+# [[inputs.interrupts]]
+# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is
+# ## stored as a field.
+# ##
+# ## The default is false for backwards compatibility, and will be changed to
+# ## true in a future version. It is recommended to set to true on new
+# ## deployments.
+# # cpu_as_tag = false
+#
+# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
+# # [inputs.interrupts.tagdrop]
+# # irq = [ "NET_RX", "TASKLET" ]
+
+
+# # Read metrics from the bare metal servers via IPMI
+# [[inputs.ipmi_sensor]]
+# ## optionally specify the path to the ipmitool executable
+# # path = "/usr/bin/ipmitool"
+# ##
+# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool.
+# ## Sudo must be configured to allow the telegraf user to run ipmitool
+# ## without a password.
+# # use_sudo = false
+# ##
+# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
+# # privilege = "ADMINISTRATOR"
+# ##
+# ## optionally specify one or more servers via a url matching
+# ## [username[:password]@][protocol[(address)]]
+# ## e.g.
+# ## root:passwd@lan(127.0.0.1)
+# ##
+# ## if no servers are specified, local machine sensor stats will be queried
+# ##
+# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
+#
+# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
+# ## gaps or overlap in pulled data
+# interval = "30s"
+#
+# ## Timeout for the ipmitool command to complete
+# timeout = "20s"
+#
+# ## Schema Version: (Optional, defaults to version 1)
+# metric_version = 2
+
+
+# # Gather packets and bytes counters from Linux ipsets
+# [[inputs.ipset]]
+# ## By default, we only show sets which have already matched at least 1 packet.
+# ## set include_unmatched_sets = true to gather them all.
+# include_unmatched_sets = false
+# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
+# use_sudo = false
+# ## The default timeout of 1s for ipset execution can be overridden here:
+# # timeout = "1s"
+
+
+# # Gather packets and bytes throughput from iptables
+# [[inputs.iptables]]
+# ## iptables require root access on most systems.
+# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
+# ## Users must configure sudo to allow telegraf user to run iptables with no password.
+# ## iptables can be restricted to only list command "iptables -nvL".
+# use_sudo = false
+# ## Setting 'use_lock' to true runs iptables with the "-w" option.
+# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
+# use_lock = false
+# ## Define an alternate executable, such as "ip6tables". Default is "iptables".
+# # binary = "ip6tables"
+# ## defines the table to monitor:
+# table = "filter"
+# ## defines the chains to monitor.
+# ## NOTE: iptables rules without a comment will not be monitored.
+# ## Read the plugin documentation for more information.
+# chains = [ "INPUT" ]
+
+
+# # Collect virtual and real server stats from Linux IPVS
+# [[inputs.ipvs]]
+# # no configuration
+
+
+# # Read jobs and cluster metrics from Jenkins instances
+# [[inputs.jenkins]]
+# ## The Jenkins URL in the format "schema://host:port"
+# url = "http://my-jenkins-instance:8080"
+# # username = "admin"
+# # password = "admin"
+#
+# ## Set response_timeout
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use SSL but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional Max Job Build Age filter
+# ## Default 1 hour, ignore builds older than max_build_age
+# # max_build_age = "1h"
+#
+# ## Optional Sub Job Depth filter
+# ## Jenkins can have unlimited layer of sub jobs
+# ## This config will limit the layers of pulling, default value 0 means
+# ## unlimited pulling until no more sub jobs
+# # max_subjob_depth = 0
+#
+# ## Optional Sub Job Per Layer
+# ## In workflow-multibranch-plugin, each branch will be created as a sub job.
+# ## This config will limit to call only the lasted branches in each layer,
+# ## empty will use default value 10
+# # max_subjob_per_layer = 10
+#
+# ## Jobs to exclude from gathering
+# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
+#
+# ## Nodes to exclude from gathering
+# # node_exclude = [ "node1", "node2" ]
+#
+# ## Worker pool for jenkins plugin only
+# ## Empty this field will use default value 5
+# # max_connections = 5
+
+
+# # Read JMX metrics through Jolokia
+# [[inputs.jolokia]]
+# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
+# # jolokia2 plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
+#
+# ## This is the context root used to compose the jolokia url
+# ## NOTE that Jolokia requires a trailing slash at the end of the context root
+# ## NOTE that your jolokia security policy must allow for POST requests.
+# context = "/jolokia/"
+#
+# ## This specifies the mode used
+# # mode = "proxy"
+# #
+# ## When in proxy mode this section is used to specify further
+# ## proxy address configurations.
+# ## Remember to change host address to fit your environment.
+# # [inputs.jolokia.proxy]
+# # host = "127.0.0.1"
+# # port = "8080"
+#
+# ## Optional http timeouts
+# ##
+# ## response_header_timeout, if non-zero, specifies the amount of time to wait
+# ## for a server's response headers after fully writing the request.
+# # response_header_timeout = "3s"
+# ##
+# ## client_timeout specifies a time limit for requests made by this client.
+# ## Includes connection time, any redirects, and reading the response body.
+# # client_timeout = "4s"
+#
+# ## Attribute delimiter
+# ##
+# ## When multiple attributes are returned for a single
+# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
+# ## name, and the attribute name, separated by the given delimiter.
+# # delimiter = "_"
+#
+# ## List of servers exposing jolokia read service
+# [[inputs.jolokia.servers]]
+# name = "as-server-01"
+# host = "127.0.0.1"
+# port = "8080"
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## List of metrics collected on above servers
+# ## Each metric consists in a name, a jmx path and either
+# ## a pass or drop slice attribute.
+# ##Â This collect all heap memory usage metrics.
+# [[inputs.jolokia.metrics]]
+# name = "heap_memory_usage"
+# mbean = "java.lang:type=Memory"
+# attribute = "HeapMemoryUsage"
+#
+# ##Â This collect thread counts metrics.
+# [[inputs.jolokia.metrics]]
+# name = "thread_count"
+# mbean = "java.lang:type=Threading"
+# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
+#
+# ##Â This collect number of class loaded/unloaded counts metrics.
+# [[inputs.jolokia.metrics]]
+# name = "class_count"
+# mbean = "java.lang:type=ClassLoading"
+# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
+
+
+# # Read JMX metrics from a Jolokia REST agent endpoint
+# [[inputs.jolokia2_agent]]
+# # default_tag_prefix = ""
+# # default_field_prefix = ""
+# # default_field_separator = "."
+#
+# # Add agents URLs to query
+# urls = ["http://localhost:8080/jolokia"]
+# # username = ""
+# # password = ""
+# # response_timeout = "5s"
+#
+# ## Optional TLS config
+# # tls_ca = "/var/private/ca.pem"
+# # tls_cert = "/var/private/client.pem"
+# # tls_key = "/var/private/client-key.pem"
+# # insecure_skip_verify = false
+#
+# ## Add metrics to read
+# [[inputs.jolokia2_agent.metric]]
+# name = "java_runtime"
+# mbean = "java.lang:type=Runtime"
+# paths = ["Uptime"]
+
+
+# # Read JMX metrics from a Jolokia REST proxy endpoint
+# [[inputs.jolokia2_proxy]]
+# # default_tag_prefix = ""
+# # default_field_prefix = ""
+# # default_field_separator = "."
+#
+# ## Proxy agent
+# url = "http://localhost:8080/jolokia"
+# # username = ""
+# # password = ""
+# # response_timeout = "5s"
+#
+# ## Optional TLS config
+# # tls_ca = "/var/private/ca.pem"
+# # tls_cert = "/var/private/client.pem"
+# # tls_key = "/var/private/client-key.pem"
+# # insecure_skip_verify = false
+#
+# ## Add proxy targets to query
+# # default_target_username = ""
+# # default_target_password = ""
+# [[inputs.jolokia2_proxy.target]]
+# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
+# # username = ""
+# # password = ""
+#
+# ## Add metrics to read
+# [[inputs.jolokia2_proxy.metric]]
+# name = "java_runtime"
+# mbean = "java.lang:type=Runtime"
+# paths = ["Uptime"]
+
+
+# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
+# [[inputs.kapacitor]]
+# ## Multiple URLs from which to read Kapacitor-formatted JSON
+# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
+# urls = [
+# "http://localhost:9092/kapacitor/v1/debug/vars"
+# ]
+#
+# ## Time limit for http requests
+# timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Get kernel statistics from /proc/vmstat
+# [[inputs.kernel_vmstat]]
+# # no configuration
+
+
+# # Read status information from one or more Kibana servers
+# [[inputs.kibana]]
+# ## Specify a list of one or more Kibana servers
+# servers = ["http://localhost:5601"]
+#
+# ## Timeout for HTTP requests
+# timeout = "5s"
+#
+# ## HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from the Kubernetes api
+# [[inputs.kube_inventory]]
+# ## URL for the Kubernetes API
+# url = "https://127.0.0.1"
+#
+# ## Namespace to use. Set to "" to use all namespaces.
+# # namespace = "default"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## Optional Resources to exclude from gathering
+# ## Leave them with blank with try to gather everything available.
+# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
+# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
+# # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
+#
+# ## Optional Resources to include when gathering
+# ## Overrides resource_exclude if both set.
+# # resource_include = [ "deployments", "nodes", "statefulsets" ]
+#
+# ## selectors to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all selectors as tags
+# ## selector_exclude overrides selector_include if both set.
+# # selector_include = []
+# # selector_exclude = ["*"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/path/to/cafile"
+# # tls_cert = "/path/to/certfile"
+# # tls_key = "/path/to/keyfile"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from the kubernetes kubelet api
+# [[inputs.kubernetes]]
+# ## URL for the kubelet
+# url = "http://127.0.0.1:10255"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# ## If both of these are empty, we'll use the default serviceaccount:
+# ## at: /run/secrets/kubernetes.io/serviceaccount/token
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## Pod labels to be added as tags. An empty array for both include and
+# ## exclude will include all labels.
+# # label_include = []
+# # label_exclude = ["*"]
+#
+# ## Set response_timeout (default 5 seconds)
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = /path/to/cafile
+# # tls_cert = /path/to/certfile
+# # tls_key = /path/to/keyfile
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from a LeoFS Server via SNMP
+# [[inputs.leofs]]
+# ## An array of URLs of the form:
+# ## host [ ":" port]
+# servers = ["127.0.0.1:4020"]
+
+
+# # Provides Linux sysctl fs metrics
+# [[inputs.linux_sysctl_fs]]
+# # no configuration
+
+
+# # Read metrics exposed by Logstash
+# [[inputs.logstash]]
+# ## The URL of the exposed Logstash API endpoint.
+# url = "http://127.0.0.1:9600"
+#
+# ## Use Logstash 5 single pipeline API, set to true when monitoring
+# ## Logstash 5.
+# # single_pipeline = false
+#
+# ## Enable optional collection components. Can contain
+# ## "pipelines", "process", and "jvm".
+# # collect = ["pipelines", "process", "jvm"]
+#
+# ## Timeout for HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional HTTP Basic Auth credentials.
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config.
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Use TLS but skip chain & host verification.
+# # insecure_skip_verify = false
+#
+# ## Optional HTTP headers.
+# # [inputs.logstash.headers]
+# # "X-Special-Header" = "Special-Value"
+
+
+# # Read metrics from local Lustre service on OST, MDS
+# [[inputs.lustre2]]
+# ## An array of /proc globs to search for Lustre stats
+# ## If not specified, the default will work on Lustre 2.5.x
+# ##
+# # ost_procfiles = [
+# # "/proc/fs/lustre/obdfilter/*/stats",
+# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
+# # "/proc/fs/lustre/obdfilter/*/job_stats",
+# # ]
+# # mds_procfiles = [
+# # "/proc/fs/lustre/mdt/*/md_stats",
+# # "/proc/fs/lustre/mdt/*/job_stats",
+# # ]
+
+
+# # Gathers metrics from the /3.0/reports MailChimp API
+# [[inputs.mailchimp]]
+# ## MailChimp API key
+# ## get from https://admin.mailchimp.com/account/api/
+# api_key = "" # required
+# ## Reports for campaigns sent more than days_old ago will not be collected.
+# ## 0 means collect all.
+# days_old = 0
+# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
+# # campaign_id = ""
+
+
+# # Retrieves information on a specific host in a MarkLogic Cluster
+# [[inputs.marklogic]]
+# ## Base URL of the MarkLogic HTTP Server.
+# url = "http://localhost:8002"
+#
+# ## List of specific hostnames to retrieve information. At least (1) required.
+# # hosts = ["hostname1", "hostname2"]
+#
+# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
+# # username = "myuser"
+# # password = "mypassword"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from one or many mcrouter servers
+# [[inputs.mcrouter]]
+# ## An array of address to gather stats about. Specify an ip or hostname
+# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
+# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
+#
+# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
+# # timeout = "5s"
+
+
+# # Read metrics from one or many memcached servers
+# [[inputs.memcached]]
+# ## An array of address to gather stats about. Specify an ip on hostname
+# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
+# servers = ["localhost:11211"]
+# # unix_sockets = ["/var/run/memcached.sock"]
+
+
+# # Telegraf plugin for gathering metrics from N Mesos masters
+# [[inputs.mesos]]
+# ## Timeout, in ms.
+# timeout = 100
+#
+# ## A list of Mesos masters.
+# masters = ["http://localhost:5050"]
+#
+# ## Master metrics groups to be collected, by default, all enabled.
+# master_collections = [
+# "resources",
+# "master",
+# "system",
+# "agents",
+# "frameworks",
+# "framework_offers",
+# "tasks",
+# "messages",
+# "evqueue",
+# "registrar",
+# "allocator",
+# ]
+#
+# ## A list of Mesos slaves, default is []
+# # slaves = []
+#
+# ## Slave metrics groups to be collected, by default, all enabled.
+# # slave_collections = [
+# # "resources",
+# # "agent",
+# # "system",
+# # "executors",
+# # "tasks",
+# # "messages",
+# # ]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Collects scores from a Minecraft server's scoreboard using the RCON protocol
+# [[inputs.minecraft]]
+# ## Address of the Minecraft server.
+# # server = "localhost"
+#
+# ## Server RCON Port.
+# # port = "25575"
+#
+# ## Server RCON Password.
+# password = ""
+#
+# ## Uncomment to remove deprecated metric components.
+# # tagdrop = ["server"]
+
+
+# # Retrieve data from MODBUS slave devices
+# [[inputs.modbus]]
+# ## Connection Configuration
+# ##
+# ## The plugin supports connections to PLCs via MODBUS/TCP or
+# ## via serial line communication in binary (RTU) or readable (ASCII) encoding
+# ##
+# ## Device name
+# name = "Device"
+#
+# ## Slave ID - addresses a MODBUS device on the bus
+# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved]
+# slave_id = 1
+#
+# ## Timeout for each request
+# timeout = "1s"
+#
+# ## Maximum number of retries and the time to wait between retries
+# ## when a slave-device is busy.
+# # busy_retries = 0
+# # busy_retries_wait = "100ms"
+#
+# # TCP - connect via Modbus/TCP
+# controller = "tcp://localhost:502"
+#
+# ## Serial (RS485; RS232)
+# # controller = "file:///dev/ttyUSB0"
+# # baud_rate = 9600
+# # data_bits = 8
+# # parity = "N"
+# # stop_bits = 1
+# # transmission_mode = "RTU"
+#
+#
+# ## Measurements
+# ##
+#
+# ## Digital Variables, Discrete Inputs and Coils
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## address - variable address
+#
+# discrete_inputs = [
+# { name = "start", address = [0]},
+# { name = "stop", address = [1]},
+# { name = "reset", address = [2]},
+# { name = "emergency_stop", address = [3]},
+# ]
+# coils = [
+# { name = "motor1_run", address = [0]},
+# { name = "motor1_jog", address = [1]},
+# { name = "motor1_stop", address = [2]},
+# ]
+#
+# ## Analog Variables, Input Registers and Holding Registers
+# ## measurement - the (optional) measurement name, defaults to "modbus"
+# ## name - the variable name
+# ## byte_order - the ordering of bytes
+# ## |---AB, ABCD - Big Endian
+# ## |---BA, DCBA - Little Endian
+# ## |---BADC - Mid-Big Endian
+# ## |---CDAB - Mid-Little Endian
+# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation)
+# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input)
+# ## scale - the final numeric variable representation
+# ## address - variable address
+#
+# holding_registers = [
+# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]},
+# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]},
+# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]},
+# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]},
+# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]},
+# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]},
+# ]
+# input_registers = [
+# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]},
+# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]},
+# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]},
+# ]
+
+
+# # Read metrics from one or many MongoDB servers
+# [[inputs.mongodb]]
+# ## An array of URLs of the form:
+# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
+# ## For example:
+# ## mongodb://user:auth_key@10.10.3.30:27017,
+# ## mongodb://10.10.3.33:18832,
+# servers = ["mongodb://127.0.0.1:27017"]
+#
+# ## When true, collect cluster status
+# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which
+# ## may have an impact on performance.
+# # gather_cluster_status = true
+#
+# ## When true, collect per database stats
+# # gather_perdb_stats = false
+#
+# ## When true, collect per collection stats
+# # gather_col_stats = false
+#
+# ## List of db where collections stats are collected
+# ## If empty, all db are concerned
+# # col_stats_dbs = ["local"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics and status information about processes managed by Monit
+# [[inputs.monit]]
+# ## Monit HTTPD address
+# address = "http://127.0.0.1:2812"
+#
+# ## Username and Password for Monit
+# # username = ""
+# # password = ""
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Aggregates the contents of multiple files into a single point
+# [[inputs.multifile]]
+# ## Base directory where telegraf will look for files.
+# ## Omit this option to use absolute paths.
+# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
+#
+# ## If true, Telegraf discard all data when a single file can't be read.
+# ## Else, Telegraf omits the field generated from this file.
+# # fail_early = true
+#
+# ## Files to parse each interval.
+# [[inputs.multifile.file]]
+# file = "in_pressure_input"
+# dest = "pressure"
+# conversion = "float"
+# [[inputs.multifile.file]]
+# file = "in_temp_input"
+# dest = "temperature"
+# conversion = "float(3)"
+# [[inputs.multifile.file]]
+# file = "in_humidityrelative_input"
+# dest = "humidityrelative"
+# conversion = "float(3)"
+
+
+# # Read metrics from one or many mysql servers
+#[[inputs.mysql]]
+# ## specify servers via a url matching:
+# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
+# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
+# ## e.g.
+# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
+# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
+# #
+# ## If no servers are specified, then localhost is used as the host.
+# servers = ["admin:JambonzR0ck$@tcp(aurora-cluster-jambonz.cluster-c9hzpr8ulflh.us-west-1.rds.amazonaws.com:3306)/"]
+#
+# ## Selects the metric output format.
+# ##
+# ## This option exists to maintain backwards compatibility, if you have
+# ## existing metrics do not set or change this value until you are ready to
+# ## migrate to the new format.
+# ##
+# ## If you do not have existing metrics from this plugin set to the latest
+# ## version.
+# ##
+# ## Telegraf >=1.6: metric_version = 2
+# ## <1.6: metric_version = 1 (or unset)
+# metric_version = 2
+#
+# ## if the list is empty, then metrics are gathered from all database tables
+# table_schema_databases = ["jambones"]
+#
+# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
+# # gather_table_schema = false
+#
+# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
+# # gather_process_list = false
+#
+# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
+# # gather_user_statistics = false
+#
+# ## gather auto_increment columns and max values from information schema
+# # gather_info_schema_auto_inc = false
+#
+# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
+# # gather_innodb_metrics = false
+#
+# ## gather metrics from SHOW SLAVE STATUS command output
+# # gather_slave_status = false
+#
+# ## gather metrics from SHOW BINARY LOGS command output
+# # gather_binary_logs = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES
+# # gather_global_variables = true
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
+# # gather_table_io_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
+# # gather_table_lock_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
+# # gather_index_io_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
+# # gather_event_waits = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
+# # gather_file_events_stats = false
+#
+# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
+# # gather_perf_events_statements = false
+#
+# ## the limits for metrics form perf_events_statements
+# # perf_events_statements_digest_text_limit = 120
+# # perf_events_statements_limit = 250
+# # perf_events_statements_time_limit = 86400
+#
+# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
+# ## example: interval_slow = "30m"
+# # interval_slow = ""
+#
+# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Provides metrics about the state of a NATS server
+# [[inputs.nats]]
+# ## The address of the monitoring endpoint of the NATS server
+# server = "http://localhost:8222"
+#
+# ## Maximum time to receive response
+# # response_timeout = "5s"
+
+
+# # Neptune Apex data collector
+# [[inputs.neptune_apex]]
+# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
+# ## Measurements will be logged under "apex".
+#
+# ## The base URL of the local Apex(es). If you specify more than one server, they will
+# ## be differentiated by the "source" tag.
+# servers = [
+# "http://apex.local",
+# ]
+#
+# ## The response_timeout specifies how long to wait for a reply from the Apex.
+# #response_timeout = "5s"
+
+
+# # Read metrics about network interface usage
+[[inputs.net]]
+# ## By default, telegraf gathers stats from any up interface (excluding loopback)
+# ## Setting interfaces will tell it to gather these explicit interfaces,
+# ## regardless of status.
+# ##
+# # interfaces = ["eth0"]
+# ##
+# ## On linux systems telegraf also collects protocol stats.
+# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
+# ##
+# # ignore_protocol_stats = false
+# ##
+
+
+# # Collect response time of a TCP or UDP connection
+# [[inputs.net_response]]
+# ## Protocol, must be "tcp" or "udp"
+# ## NOTE: because the "udp" protocol does not respond to requests, it requires
+# ## a send/expect string pair (see below).
+# protocol = "tcp"
+# ## Server address (default localhost)
+# address = "localhost:80"
+#
+# ## Set timeout
+# # timeout = "1s"
+#
+# ## Set read timeout (only used if expecting a response)
+# # read_timeout = "1s"
+#
+# ## The following options are required for UDP checks. For TCP, they are
+# ## optional. The plugin will send the given string to the server and then
+# ## expect to receive the given 'expect' string back.
+# ## string sent to the server
+# # send = "ssh"
+# ## expected string in answer
+# # expect = "ssh"
+#
+# ## Uncomment to remove deprecated fields
+# # fielddrop = ["result_type", "string_found"]
+
+
+# # Read TCP metrics such as established, time wait and sockets counts.
+[[inputs.netstat]]
+# # no configuration
+
+
+# # Read Nginx's basic status information (ngx_http_stub_status_module)
+# [[inputs.nginx]]
+# # An array of Nginx stub_status URI to gather stats.
+# urls = ["http://localhost/server_status"]
+#
+# ## Optional TLS Config
+# tls_ca = "/etc/telegraf/ca.pem"
+# tls_cert = "/etc/telegraf/cert.cer"
+# tls_key = "/etc/telegraf/key.key"
+# ## Use TLS but skip chain & host verification
+# insecure_skip_verify = false
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+
+
+# # Read Nginx Plus' full status information (ngx_http_status_module)
+# [[inputs.nginx_plus]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx Plus Api documentation
+# [[inputs.nginx_plus_api]]
+# ## An array of API URI to gather stats.
+# urls = ["http://localhost/api"]
+#
+# # Nginx API version, default: 3
+# # api_version = 3
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx virtual host traffic status module information (nginx-module-sts)
+# [[inputs.nginx_sts]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# ## HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
+# [[inputs.nginx_upstream_check]]
+# ## An URL where Nginx Upstream check module is enabled
+# ## It should be set to return a JSON formatted response
+# url = "http://127.0.0.1/status?format=json"
+#
+# ## HTTP method
+# # method = "GET"
+#
+# ## Optional HTTP headers
+# # headers = {"X-Special-Header" = "Special-Value"}
+#
+# ## Override HTTP "Host" header
+# # host_header = "check.example.com"
+#
+# ## Timeout for HTTP requests
+# timeout = "5s"
+#
+# ## Optional HTTP Basic Auth credentials
+# # username = "username"
+# # password = "pa$$word"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read Nginx virtual host traffic status module information (nginx-module-vts)
+# [[inputs.nginx_vts]]
+# ## An array of ngx_http_status_module or status URI to gather stats.
+# urls = ["http://localhost/status"]
+#
+# ## HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # A plugin to collect stats from the NSD authoritative DNS name server
+# [[inputs.nsd]]
+# ## Address of server to connect to, optionally ':port'. Defaults to the
+# ## address in the nsd config file.
+# server = "127.0.0.1:8953"
+#
+# ## If running as a restricted user you can prepend sudo for additional access:
+# # use_sudo = false
+#
+# ## The default location of the nsd-control binary can be overridden with:
+# # binary = "/usr/sbin/nsd-control"
+#
+# ## The default location of the nsd config file can be overridden with:
+# # config_file = "/etc/nsd/nsd.conf"
+#
+# ## The default timeout of 1s can be overridden with:
+# # timeout = "1s"
+
+
+# # Read NSQ topic and channel statistics.
+# [[inputs.nsq]]
+# ## An array of NSQD HTTP API endpoints
+# endpoints = ["http://localhost:4151"]
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Collect kernel snmp counters and network interface statistics
+# [[inputs.nstat]]
+# ## file paths for proc files. If empty default paths will be used:
+# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
+# ## These can also be overridden with env variables, see README.
+# proc_net_netstat = "/proc/net/netstat"
+# proc_net_snmp = "/proc/net/snmp"
+# proc_net_snmp6 = "/proc/net/snmp6"
+# ## dump metrics with 0 values too
+# dump_zeros = true
+
+
+# # Get standard NTP query metrics, requires ntpq executable.
+# [[inputs.ntpq]]
+# ## If false, set the -n ntpq flag. Can reduce metric gather time.
+# dns_lookup = true
+
+
+# # Pulls statistics from nvidia GPUs attached to the host
+# [[inputs.nvidia_smi]]
+# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
+# # bin_path = "/usr/bin/nvidia-smi"
+#
+# ## Optional: timeout for GPU polling
+# # timeout = "5s"
+
+
+# # Retrieve data from OPCUA devices
+# [[inputs.opcua]]
+# [[inputs.opcua]]
+# ## Device name
+# # name = "localhost"
+# #
+# ## OPC UA Endpoint URL
+# # endpoint = "opc.tcp://localhost:4840"
+# #
+# ## Maximum time allowed to establish a connect to the endpoint.
+# # connect_timeout = "10s"
+# #
+# ## Maximum time allowed for a request over the estabilished connection.
+# # request_timeout = "5s"
+# #
+# ## Security policy, one of "None", "Basic128Rsa15", "Basic256",
+# ## "Basic256Sha256", or "auto"
+# # security_policy = "auto"
+# #
+# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto"
+# # security_mode = "auto"
+# #
+# ## Path to cert.pem. Required when security mode or policy isn't "None".
+# ## If cert path is not supplied, self-signed cert and key will be generated.
+# # certificate = "/etc/telegraf/cert.pem"
+# #
+# ## Path to private key.pem. Required when security mode or policy isn't "None".
+# ## If key path is not supplied, self-signed cert and key will be generated.
+# # private_key = "/etc/telegraf/key.pem"
+# #
+# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To
+# ## authenticate using a specific ID, select 'Certificate' or 'UserName'
+# # auth_method = "Anonymous"
+# #
+# ## Username. Required for auth_method = "UserName"
+# # username = ""
+# #
+# ## Password. Required for auth_method = "UserName"
+# # password = ""
+# #
+# ## Node ID configuration
+# ## name - the variable name
+# ## namespace - integer value 0 thru 3
+# ## identifier_type - s=string, i=numeric, g=guid, b=opaque
+# ## identifier - tag as shown in opcua browser
+# ## data_type - boolean, byte, short, int, uint, uint16, int16,
+# ## uint32, int32, float, double, string, datetime, number
+# ## Example:
+# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"}
+# nodes = [
+# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""},
+# ]
+
+
+# # OpenLDAP cn=Monitor plugin
+# [[inputs.openldap]]
+# host = "localhost"
+# port = 389
+#
+# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
+# # note that port will likely need to be changed to 636 for ldaps
+# # valid options: "" | "starttls" | "ldaps"
+# tls = ""
+#
+# # skip peer certificate verification. Default is false.
+# insecure_skip_verify = false
+#
+# # Path to PEM-encoded Root certificate to use to verify server certificate
+# tls_ca = "/etc/ssl/certs.pem"
+#
+# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
+# bind_dn = ""
+# bind_password = ""
+#
+# # Reverse metric names so they sort more naturally. Recommended.
+# # This defaults to false if unset, but is set to true when generating a new config
+# reverse_metric_names = true
+
+
+# # Get standard NTP query metrics from OpenNTPD.
+# [[inputs.openntpd]]
+# ## Run ntpctl binary with sudo.
+# # use_sudo = false
+#
+# ## Location of the ntpctl binary.
+# # binary = "/usr/sbin/ntpctl"
+#
+# ## Maximum time the ntpctl binary is allowed to run.
+# # timeout = "5ms"
+
+
+# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
+# [[inputs.opensmtpd]]
+# ## If running as a restricted user you can prepend sudo for additional access:
+# #use_sudo = false
+#
+# ## The default location of the smtpctl binary can be overridden with:
+# binary = "/usr/sbin/smtpctl"
+#
+# ## The default timeout of 1000ms can be overridden with (in milliseconds):
+# timeout = 1000
+
+
+# # Read current weather and forecasts data from openweathermap.org
+# [[inputs.openweathermap]]
+# ## OpenWeatherMap API key.
+# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+#
+# ## City ID's to collect weather data from.
+# city_id = ["5391959"]
+#
+# ## Language of the description field. Can be one of "ar", "bg",
+# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu",
+# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru",
+# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw"
+# # lang = "en"
+#
+# ## APIs to fetch; can contain "weather" or "forecast".
+# fetch = ["weather", "forecast"]
+#
+# ## OpenWeatherMap base URL
+# # base_url = "https://api.openweathermap.org/"
+#
+# ## Timeout for HTTP response.
+# # response_timeout = "5s"
+#
+# ## Preferred unit system for temperature and wind speed. Can be one of
+# ## "metric", "imperial", or "standard".
+# # units = "metric"
+#
+# ## Query interval; OpenWeatherMap updates their weather data every 10
+# ## minutes.
+# interval = "10m"
+
+
+# # Read metrics of passenger using passenger-status
+# [[inputs.passenger]]
+# ## Path of passenger-status.
+# ##
+# ## Plugin gather metric via parsing XML output of passenger-status
+# ## More information about the tool:
+# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
+# ##
+# ## If no path is specified, then the plugin simply execute passenger-status
+# ## hopefully it can be found in your PATH
+# command = "passenger-status -v --show=xml"
+
+
+# # Gather counters from PF
+# [[inputs.pf]]
+# ## PF require root access on most systems.
+# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
+# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
+# ## pfctl can be restricted to only list command "pfctl -s info".
+# use_sudo = false
+
+
+# # Read metrics of phpfpm, via HTTP status page or socket
+# [[inputs.phpfpm]]
+# ## An array of addresses to gather stats about. Specify an ip or hostname
+# ## with optional port and path
+# ##
+# ## Plugin can be configured in three modes (either can be used):
+# ## - http: the URL must start with http:// or https://, ie:
+# ## "http://localhost/status"
+# ## "http://192.168.130.1/status?full"
+# ##
+# ## - unixsocket: path to fpm socket, ie:
+# ## "/var/run/php5-fpm.sock"
+# ## or using a custom fpm status path:
+# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
+# ##
+# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
+# ## "fcgi://10.0.0.12:9000/status"
+# ## "cgi://10.0.10.12:9001/status"
+# ##
+# ## Example of multiple gathering from local socket and remote host
+# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
+# urls = ["http://localhost/status"]
+#
+# ## Duration allowed to complete HTTP requests.
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Ping given url(s) and return statistics
+# [[inputs.ping]]
+# ## Hosts to send ping packets to.
+# urls = ["example.org"]
+#
+# ## Method used for sending pings, can be either "exec" or "native". When set
+# ## to "exec" the systems ping command will be executed. When set to "native"
+# ## the plugin will send pings directly.
+# ##
+# ## While the default is "exec" for backwards compatibility, new deployments
+# ## are encouraged to use the "native" method for improved compatibility and
+# ## performance.
+# # method = "exec"
+#
+# ## Number of ping packets to send per interval. Corresponds to the "-c"
+# ## option of the ping command.
+# # count = 1
+#
+# ## Time to wait between sending ping packets in seconds. Operates like the
+# ## "-i" option of the ping command.
+# # ping_interval = 1.0
+#
+# ## If set, the time to wait for a ping response in seconds. Operates like
+# ## the "-W" option of the ping command.
+# # timeout = 1.0
+#
+# ## If set, the total ping deadline, in seconds. Operates like the -w option
+# ## of the ping command.
+# # deadline = 10
+#
+# ## Interface or source address to send ping from. Operates like the -I or -S
+# ## option of the ping command.
+# # interface = ""
+#
+# ## Specify the ping executable binary.
+# # binary = "ping"
+#
+# ## Arguments for ping command. When arguments is not empty, the command from
+# ## the binary option will be used and other options (ping_interval, timeout,
+# ## etc) will be ignored.
+# # arguments = ["-c", "3"]
+#
+# ## Use only IPv6 addresses when resolving a hostname.
+# # ipv6 = false
+
+
+# # Measure postfix queue statistics
+# [[inputs.postfix]]
+# ## Postfix queue directory. If not provided, telegraf will try to use
+# ## 'postconf -h queue_directory' to determine it.
+# # queue_directory = "/var/spool/postfix"
+
+
+# # Read metrics from one or many PowerDNS servers
+# [[inputs.powerdns]]
+# ## An array of sockets to gather stats about.
+# ## Specify a path to unix socket.
+# unix_sockets = ["/var/run/pdns.controlsocket"]
+
+
+# # Read metrics from one or many PowerDNS Recursor servers
+# [[inputs.powerdns_recursor]]
+# ## Path to the Recursor control socket.
+# unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
+#
+# ## Directory to create receive socket. This default is likely not writable,
+# ## please reference the full plugin documentation for a recommended setup.
+# # socket_dir = "/var/run/"
+# ## Socket permissions for the receive socket.
+# # socket_mode = "0666"
+
+
+# # Monitor process cpu and memory usage
+[[inputs.procstat]]
+ exe = "freeswitch"
+
+[[inputs.procstat]]
+ exe = "rtpengine"
+
+[[inputs.procstat]]
+ exe = "drachtio"
+
+[[inputs.procstat]]
+ exe = "node"
+
+# ## PID file to monitor process
+# pid_file = "/var/run/nginx.pid"
+# ## executable name (ie, pgrep )
+ # exe = "nginx"
+# ## pattern as argument for pgrep (ie, pgrep -f )
+# # pattern = "nginx"
+# ## user as argument for pgrep (ie, pgrep -u )
+# # user = "nginx"
+# ## Systemd unit name
+# # systemd_unit = "nginx.service"
+# ## CGroup name or path
+# # cgroup = "systemd/system.slice/nginx.service"
+#
+# ## Windows service name
+# # win_service = ""
+#
+# ## override for process_name
+# ## This is optional; default is sourced from /proc//status
+# # process_name = "bar"
+#
+# ## Field name prefix
+# # prefix = ""
+#
+# ## When true add the full cmdline as a tag.
+# # cmdline_tag = false
+#
+# ## Add the PID as a tag instead of as a field. When collecting multiple
+# ## processes with otherwise matching tags this setting should be enabled to
+# ## ensure each process has a unique identity.
+# ##
+# ## Enabling this option may result in a large number of series, especially
+# ## when processes have a short lifetime.
+# # pid_tag = false
+#
+# ## Method to use when finding process IDs. Can be one of 'pgrep', or
+# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
+# ## the native finder performs the search directly in a manor dependent on the
+# ## platform. Default is 'pgrep'
+# # pid_finder = "pgrep"
+
+
+# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2).
+# [[inputs.proxmox]]
+# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /.
+# base_url = "https://localhost:8006/api2/json"
+# api_token = "USER@REALM!TOKENID=UUID"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# insecure_skip_verify = false
+#
+# # HTTP response timeout (default: 5s)
+# response_timeout = "5s"
+
+
+# # Reads last_run_summary.yaml file and converts to measurements
+# [[inputs.puppetagent]]
+# ## Location of puppet last run summary file
+# location = "/var/lib/puppet/state/last_run_summary.yaml"
+
+
+# # Reads metrics from RabbitMQ servers via the Management Plugin
+# [[inputs.rabbitmq]]
+# ## Management Plugin url. (default: http://localhost:15672)
+# # url = "http://localhost:15672"
+# ## Tag added to rabbitmq_overview series; deprecated: use tags
+# # name = "rmq-server-1"
+# ## Credentials
+# # username = "guest"
+# # password = "guest"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Optional request timeouts
+# ##
+# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
+# ## for a server's response headers after fully writing the request.
+# # header_timeout = "3s"
+# ##
+# ## client_timeout specifies a time limit for requests made by this client.
+# ## Includes connection time, any redirects, and reading the response body.
+# # client_timeout = "4s"
+#
+# ## A list of nodes to gather as the rabbitmq_node measurement. If not
+# ## specified, metrics for all nodes are gathered.
+# # nodes = ["rabbit@node1", "rabbit@node2"]
+#
+# ## A list of queues to gather as the rabbitmq_queue measurement. If not
+# ## specified, metrics for all queues are gathered.
+# # queues = ["telegraf"]
+#
+# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
+# ## specified, metrics for all exchanges are gathered.
+# # exchanges = ["telegraf"]
+#
+# ## Queues to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all queues
+# queue_name_include = []
+# queue_name_exclude = []
+#
+# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.
+# ## If neither are specified, metrics for all federation upstreams are gathered.
+# ## Federation link metrics will only be gathered for queues and exchanges
+# ## whose non-federation metrics will be collected (e.g a queue excluded
+# ## by the 'queue_name_exclude' option will also be excluded from federation).
+# ## Globs accepted.
+# # federation_upstream_include = ["dataCentre-*"]
+# # federation_upstream_exclude = []
+
+
+# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
+# [[inputs.raindrops]]
+# ## An array of raindrops middleware URI to gather stats.
+# urls = ["http://localhost:8080/_raindrops"]
+
+
+# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required).
+# [[inputs.ras]]
+# ## Optional path to RASDaemon sqlite3 database.
+# ## Default: /var/lib/rasdaemon/ras-mc_event.db
+# # db_path = ""
+
+
+# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs
+# [[inputs.redfish]]
+# ## Server url
+# address = "https://127.0.0.1:5000"
+#
+# ## Username, Password for hardware server
+# username = "root"
+# password = "password123456"
+#
+# ## ComputerSystemId
+# computer_system_id="2M220100SL"
+#
+# ## Amount of time allowed to complete the HTTP request
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from one or many redis servers
+#[[inputs.redis]]
+# servers = ["tcp://jambonz.lpypq4.0001.usw1.cache.amazonaws.com:6379"]
+# ## specify servers via a url matching:
+# ## [protocol://][:password]@address[:port]
+# ## e.g.
+# ## tcp://localhost:6379
+# ## tcp://:password@192.168.99.100
+# ## unix:///var/run/redis.sock
+# ##
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no port is specified, 6379 is used
+# servers = ["tcp://localhost:6379"]
+#
+# ## Optional. Specify redis commands to retrieve values
+# # [[inputs.redis.commands]]
+# # command = ["get", "sample-key"]
+# # field = "sample-key-value"
+# # type = "string"
+#
+# ## specify server password
+# # password = "s#cr@t%"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = true
+
+
+# # Read metrics from one or many RethinkDB servers
+# [[inputs.rethinkdb]]
+# ## An array of URI to gather stats about. Specify an ip or hostname
+# ## with optional port add password. ie,
+# ## rethinkdb://user:auth_key@10.10.3.30:28105,
+# ## rethinkdb://10.10.3.33:18832,
+# ## 10.0.0.1:10000, etc.
+# servers = ["127.0.0.1:28015"]
+# ##
+# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
+# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
+# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
+# ##
+# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
+# ## have to be named "rethinkdb".
+# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
+
+
+# # Read metrics one or many Riak servers
+# [[inputs.riak]]
+# # Specify a list of one or more riak http servers
+# servers = ["http://localhost:8098"]
+
+
+# # Read API usage and limits for a Salesforce organisation
+# [[inputs.salesforce]]
+# ## specify your credentials
+# ##
+# username = "your_username"
+# password = "your_password"
+# ##
+# ## (optional) security token
+# # security_token = "your_security_token"
+# ##
+# ## (optional) environment type (sandbox or production)
+# ## default is: production
+# ##
+# # environment = "production"
+# ##
+# ## (optional) API version (default: "39.0")
+# ##
+# # version = "39.0"
+
+
+# # Monitor sensors, requires lm-sensors package
+# [[inputs.sensors]]
+# ## Remove numbers from field names.
+# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
+# # remove_numbers = true
+#
+# ## Timeout is the maximum amount of time that the sensors command can run.
+# # timeout = "5s"
+
+
+# # Read metrics from storage devices supporting S.M.A.R.T.
+# [[inputs.smart]]
+# ## Optionally specify the path to the smartctl executable
+# # path_smartctl = "/usr/bin/smartctl"
+#
+# ## Optionally specify the path to the nvme-cli executable
+# # path_nvme = "/usr/bin/nvme"
+#
+# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case
+# ## ["auto-on"] - automatically find and enable additional vendor specific disk info
+# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info
+# # enable_extensions = ["auto-on"]
+#
+# ## On most platforms used cli utilities requires root access.
+# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli.
+# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli
+# ## without a password.
+# # use_sudo = false
+#
+# ## Skip checking disks in this power mode. Defaults to
+# ## "standby" to not wake up disks that have stopped rotating.
+# ## See --nocheck in the man pages for smartctl.
+# ## smartctl version 5.41 and 5.42 have faulty detection of
+# ## power mode and might require changing this value to
+# ## "never" depending on your disks.
+# # nocheck = "standby"
+#
+# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
+# ## information from each drive into the 'smart_attribute' measurement.
+# # attributes = false
+#
+# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed.
+# # excludes = [ "/dev/pass6" ]
+#
+# ## Optionally specify devices and device type, if unset
+# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done
+# ## and all found will be included except for the excluded in excludes.
+# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"]
+#
+# ## Timeout for the cli command to complete.
+# # timeout = "30s"
+
+
+# # Retrieves SNMP values from remote agents
+# [[inputs.snmp]]
+# ## Agent addresses to retrieve values from.
+# ## example: agents = ["udp://127.0.0.1:161"]
+# ## agents = ["tcp://127.0.0.1:161"]
+# agents = ["udp://127.0.0.1:161"]
+#
+# ## Timeout for each request.
+# # timeout = "5s"
+#
+# ## SNMP version; can be 1, 2, or 3.
+# # version = 2
+#
+# ## Agent host tag; the tag used to reference the source host
+# # agent_host_tag = "agent_host"
+#
+# ## SNMP community string.
+# # community = "public"
+#
+# ## Number of retries to attempt.
+# # retries = 3
+#
+# ## The GETBULK max-repetitions parameter.
+# # max_repetitions = 10
+#
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA", or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Context Name.
+# # context_name = ""
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+#
+# ## Add fields and tables defining the variables you wish to collect. This
+# ## example collects the system uptime and interface variables. Reference the
+# ## full plugin documentation for configuration details.
+
+
+# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
+# [[inputs.snmp_legacy]]
+# ## Use 'oids.txt' file to translate oids to names
+# ## To generate 'oids.txt' you need to run:
+# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
+# ## Or if you have an other MIB folder with custom MIBs
+# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
+# snmptranslate_file = "/tmp/oids.txt"
+# [[inputs.snmp.host]]
+# address = "192.168.2.2:161"
+# # SNMP community
+# community = "public" # default public
+# # SNMP version (1, 2 or 3)
+# # Version 3 not supported yet
+# version = 2 # default 2
+# # SNMP response timeout
+# timeout = 2.0 # default 2.0
+# # SNMP request retries
+# retries = 2 # default 2
+# # Which get/bulk do you want to collect for this host
+# collect = ["mybulk", "sysservices", "sysdescr"]
+# # Simple list of OIDs to get, in addition to "collect"
+# get_oids = []
+#
+# [[inputs.snmp.host]]
+# address = "192.168.2.3:161"
+# community = "public"
+# version = 2
+# timeout = 2.0
+# retries = 2
+# collect = ["mybulk"]
+# get_oids = [
+# "ifNumber",
+# ".1.3.6.1.2.1.1.3.0",
+# ]
+#
+# [[inputs.snmp.get]]
+# name = "ifnumber"
+# oid = "ifNumber"
+#
+# [[inputs.snmp.get]]
+# name = "interface_speed"
+# oid = "ifSpeed"
+# instance = "0"
+#
+# [[inputs.snmp.get]]
+# name = "sysuptime"
+# oid = ".1.3.6.1.2.1.1.3.0"
+# unit = "second"
+#
+# [[inputs.snmp.bulk]]
+# name = "mybulk"
+# max_repetition = 127
+# oid = ".1.3.6.1.2.1.1"
+#
+# [[inputs.snmp.bulk]]
+# name = "ifoutoctets"
+# max_repetition = 127
+# oid = "ifOutOctets"
+#
+# [[inputs.snmp.host]]
+# address = "192.168.2.13:161"
+# #address = "127.0.0.1:161"
+# community = "public"
+# version = 2
+# timeout = 2.0
+# retries = 2
+# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
+# collect = ["sysuptime" ]
+# [[inputs.snmp.host.table]]
+# name = "iftable3"
+# include_instances = ["enp5s0", "eth1"]
+#
+# # SNMP TABLEs
+# # table without mapping neither subtables
+# [[inputs.snmp.table]]
+# name = "iftable1"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+#
+# # table without mapping but with subtables
+# [[inputs.snmp.table]]
+# name = "iftable2"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
+#
+# # table with mapping but without subtables
+# [[inputs.snmp.table]]
+# name = "iftable3"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# # if empty. get all instances
+# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+# # if empty, get all subtables
+#
+# # table with both mapping and subtables
+# [[inputs.snmp.table]]
+# name = "iftable4"
+# oid = ".1.3.6.1.2.1.31.1.1.1"
+# # if empty get all instances
+# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
+# # if empty get all subtables
+# # sub_tables could be not "real subtables"
+# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
+
+
+# # Read stats from one or more Solr servers or cores
+# [[inputs.solr]]
+# ## specify a list of one or more Solr servers
+# servers = ["http://localhost:8983"]
+#
+# ## specify a list of one or more Solr cores (default - all)
+# # cores = ["main"]
+#
+# ## Optional HTTP Basic Auth Credentials
+# # username = "username"
+# # password = "pa$$word"
+
+
+# # Read metrics from Microsoft SQL Server
+# [[inputs.sqlserver]]
+# ## Specify instances to monitor with a list of connection strings.
+# ## All connection parameters are optional.
+# ## By default, the host is localhost, listening on default port, TCP 1433.
+# ## for Windows, the user is the currently running AD user (SSO).
+# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
+# ## parameters, in particular, tls connections can be created like so:
+# ## "encrypt=true;certificate=;hostNameInCertificate="
+# # servers = [
+# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;",
+# # ]
+#
+# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2
+# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type.
+# ## Possible values for database_type are
+# ## "AzureSQLDB"
+# ## "SQLServer"
+# ## "AzureSQLManagedInstance"
+# # database_type = "AzureSQLDB"
+#
+#
+# ## Optional parameter, setting this to 2 will use a new version
+# ## of the collection queries that break compatibility with the original
+# ## dashboards.
+# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB
+# query_version = 2
+#
+# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
+# # azuredb = false
+#
+# ## Possible queries
+# ## Version 2:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - DatabaseIO
+# ## - ServerProperties
+# ## - MemoryClerk
+# ## - Schedulers
+# ## - SqlRequests
+# ## - VolumeSpace
+# ## - Cpu
+#
+# ## Version 1:
+# ## - PerformanceCounters
+# ## - WaitStatsCategorized
+# ## - CPUHistory
+# ## - DatabaseIO
+# ## - DatabaseSize
+# ## - DatabaseStats
+# ## - DatabaseProperties
+# ## - MemoryClerk
+# ## - VolumeSpace
+# ## - PerformanceMetrics
+#
+#
+# ## Queries enabled by default for specific Database Type
+# ## database_type = AzureSQLDB
+# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO
+#
+# ## A list of queries to include. If not specified, all the above listed queries are used.
+# # include_query = []
+#
+# ## A list of queries to explicitly ignore.
+# exclude_query = [ 'Schedulers' , 'SqlRequests']
+
+
+# # Gather timeseries from Google Cloud Platform v3 monitoring API
+# [[inputs.stackdriver]]
+# ## GCP Project
+# project = "erudite-bloom-151019"
+#
+# ## Include timeseries that start with the given metric type.
+# metric_type_prefix_include = [
+# "compute.googleapis.com/",
+# ]
+#
+# ## Exclude timeseries that start with the given metric type.
+# # metric_type_prefix_exclude = []
+#
+# ## Many metrics are updated once per minute; it is recommended to override
+# ## the agent level interval with a value of 1m or greater.
+# interval = "1m"
+#
+# ## Maximum number of API calls to make per second. The quota for accounts
+# ## varies, it can be viewed on the API dashboard:
+# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits
+# # rate_limit = 14
+#
+# ## The delay and window options control the number of points selected on
+# ## each gather. When set, metrics are gathered between:
+# ## start: now() - delay - window
+# ## end: now() - delay
+# #
+# ## Collection delay; if set too low metrics may not yet be available.
+# # delay = "5m"
+# #
+# ## If unset, the window will start at 1m and be updated dynamically to span
+# ## the time between calls (approximately the length of the plugin interval).
+# # window = "1m"
+#
+# ## TTL for cached list of metric types. This is the maximum amount of time
+# ## it may take to discover new metrics.
+# # cache_ttl = "1h"
+#
+# ## If true, raw bucket counts are collected for distribution value types.
+# ## For a more lightweight collection, you may wish to disable and use
+# ## distribution_aggregation_aligners instead.
+# # gather_raw_distribution_buckets = true
+#
+# ## Aggregate functions to be used for metrics whose value type is
+# ## distribution. These aggregate values are recorded in in addition to raw
+# ## bucket counts; if they are enabled.
+# ##
+# ## For a list of aligner strings see:
+# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
+# # distribution_aggregation_aligners = [
+# # "ALIGN_PERCENTILE_99",
+# # "ALIGN_PERCENTILE_95",
+# # "ALIGN_PERCENTILE_50",
+# # ]
+#
+# ## Filters can be added to reduce the number of time series matched. All
+# ## functions are supported: starts_with, ends_with, has_substring, and
+# ## one_of. Only the '=' operator is supported.
+# ##
+# ## The logical operators when combining filters are defined statically using
+# ## the following values:
+# ## filter ::= {AND }
+# ## resource_labels ::= {OR }
+# ## metric_labels ::= {OR }
+# ##
+# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters
+# #
+# ## Resource labels refine the time series selection with the following expression:
+# ## resource.labels. =
+# # [[inputs.stackdriver.filter.resource_labels]]
+# # key = "instance_name"
+# # value = 'starts_with("localhost")'
+# #
+# ## Metric labels refine the time series selection with the following expression:
+# ## metric.labels. =
+# # [[inputs.stackdriver.filter.metric_labels]]
+# # key = "device_name"
+# # value = 'one_of("sda", "sdb")'
+
+
+# # Get synproxy counter statistics from procfs
+# [[inputs.synproxy]]
+# # no configuration
+
+
+# # Sysstat metrics collector
+# [[inputs.sysstat]]
+# ## Path to the sadc command.
+# #
+# ## Common Defaults:
+# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
+# ## Arch: /usr/lib/sa/sadc
+# ## RHEL/CentOS: /usr/lib64/sa/sadc
+# sadc_path = "/usr/lib/sa/sadc" # required
+#
+# ## Path to the sadf command, if it is not in PATH
+# # sadf_path = "/usr/bin/sadf"
+#
+# ## Activities is a list of activities, that are passed as argument to the
+# ## sadc collector utility (e.g: DISK, SNMP etc...)
+# ## The more activities that are added, the more data is collected.
+# # activities = ["DISK"]
+#
+# ## Group metrics to measurements.
+# ##
+# ## If group is false each metric will be prefixed with a description
+# ## and represents itself a measurement.
+# ##
+# ## If Group is true, corresponding metrics are grouped to a single measurement.
+# # group = true
+#
+# ## Options for the sadf command. The values on the left represent the sadf
+# ## options and the values on the right their description (which are used for
+# ## grouping and prefixing metrics).
+# ##
+# ## Run 'sar -h' or 'man sar' to find out the supported options for your
+# ## sysstat version.
+# [inputs.sysstat.options]
+# -C = "cpu"
+# -B = "paging"
+# -b = "io"
+# -d = "disk" # requires DISK activity
+# "-n ALL" = "network"
+# "-P ALL" = "per_cpu"
+# -q = "queue"
+# -R = "mem"
+# -r = "mem_util"
+# -S = "swap_util"
+# -u = "cpu_util"
+# -v = "inode"
+# -W = "swap"
+# -w = "task"
+# # -H = "hugepages" # only available for newer linux distributions
+# # "-I ALL" = "interrupts" # requires INT activity
+#
+# ## Device tags can be used to add additional tags for devices.
+# ## For example the configuration below adds a tag vg with value rootvg for
+# ## all metrics with sda devices.
+# # [[inputs.sysstat.device_tags.sda]]
+# # vg = "rootvg"
+
+
+# # Gather systemd units state
+# [[inputs.systemd_units]]
+# ## Set timeout for systemctl execution
+# # timeout = "1s"
+# #
+# ## Filter for a specific unit type, default is "service", other possible
+# ## values are "socket", "target", "device", "mount", "automount", "swap",
+# ## "timer", "path", "slice" and "scope ":
+# # unittype = "service"
+
+
+# # Reads metrics from a Teamspeak 3 Server via ServerQuery
+# [[inputs.teamspeak]]
+# ## Server address for Teamspeak 3 ServerQuery
+# # server = "127.0.0.1:10011"
+# ## Username for ServerQuery
+# username = "serverqueryuser"
+# ## Password for ServerQuery
+# password = "secret"
+# ## Array of virtual servers
+# # virtual_servers = [1]
+
+
+# # Read metrics about temperature
+# [[inputs.temp]]
+# # no configuration
+
+
+# # Read Tengine's basic status information (ngx_http_reqstat_module)
+# [[inputs.tengine]]
+# # An array of Tengine reqstat module URI to gather stats.
+# urls = ["http://127.0.0.1/us"]
+#
+# # HTTP response timeout (default: 5s)
+# # response_timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.cer"
+# # tls_key = "/etc/telegraf/key.key"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Gather metrics from the Tomcat server status page.
+# [[inputs.tomcat]]
+# ## URL of the Tomcat server status
+# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
+#
+# ## HTTP Basic Auth Credentials
+# # username = "tomcat"
+# # password = "s3cret"
+#
+# ## Request timeout
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Inserts sine and cosine waves for demonstration purposes
+# [[inputs.trig]]
+# ## Set the amplitude
+# amplitude = 10.0
+
+
+# # Read Twemproxy stats data
+# [[inputs.twemproxy]]
+# ## Twemproxy stats address and port (no scheme)
+# addr = "localhost:22222"
+# ## Monitor pool name
+# pools = ["redis_pool", "mc_pool"]
+
+
+# # A plugin to collect stats from the Unbound DNS resolver
+# [[inputs.unbound]]
+# ## Address of server to connect to, read from unbound conf default, optionally ':port'
+# ## Will lookup IP if given a hostname
+# server = "127.0.0.1:8953"
+#
+# ## If running as a restricted user you can prepend sudo for additional access:
+# # use_sudo = false
+#
+# ## The default location of the unbound-control binary can be overridden with:
+# # binary = "/usr/sbin/unbound-control"
+#
+# ## The default location of the unbound config file can be overridden with:
+# # config_file = "/etc/unbound/unbound.conf"
+#
+# ## The default timeout of 1s can be overridden with:
+# # timeout = "1s"
+#
+# ## When set to true, thread metrics are tagged with the thread id.
+# ##
+# ## The default is false for backwards compatibility, and will be changed to
+# ## true in a future version. It is recommended to set to true on new
+# ## deployments.
+# thread_as_tag = false
+
+
+# # Read uWSGI metrics.
+# [[inputs.uwsgi]]
+# ## List with urls of uWSGI Stats servers. URL must match pattern:
+# ## scheme://address[:port]
+# ##
+# ## For example:
+# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
+# servers = ["tcp://127.0.0.1:1717"]
+#
+# ## General connection timeout
+# # timeout = "5s"
+
+
+# # A plugin to collect stats from Varnish HTTP Cache
+# [[inputs.varnish]]
+# ## If running as a restricted user you can prepend sudo for additional access:
+# #use_sudo = false
+#
+# ## The default location of the varnishstat binary can be overridden with:
+# binary = "/usr/bin/varnishstat"
+#
+# ## By default, telegraf gather stats for 3 metric points.
+# ## Setting stats will override the defaults shown below.
+# ## Glob matching can be used, ie, stats = ["MAIN.*"]
+# ## stats may also be set to ["*"], which will collect all stats
+# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
+#
+# ## Optional name for the varnish instance (or working directory) to query
+# ## Usually append after -n in varnish cli
+# # instance_name = instanceName
+#
+# ## Timeout for varnishstat command
+# # timeout = "1s"
+
+
+# # Collect Wireguard server interface and peer statistics
+# [[inputs.wireguard]]
+# ## Optional list of Wireguard device/interface names to query.
+# ## If omitted, all Wireguard interfaces are queried.
+# # devices = ["wg0"]
+
+
+# # Monitor wifi signal strength and quality
+# [[inputs.wireless]]
+# ## Sets 'proc' directory path
+# ## If not specified, then default is /proc
+# # host_proc = "/proc"
+
+
+# # Reads metrics from a SSL certificate
+# [[inputs.x509_cert]]
+# ## List certificate sources
+# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
+#
+# ## Timeout for SSL connection
+# # timeout = "5s"
+#
+# ## Pass a different name into the TLS request (Server Name Indication)
+# ## example: server_name = "myhost.example.org"
+# # server_name = ""
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+
+
+# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
+# [[inputs.zfs]]
+# ## ZFS kstat path. Ignored on FreeBSD
+# ## If not specified, then default is:
+# # kstatPath = "/proc/spl/kstat/zfs"
+#
+# ## By default, telegraf gather all zfs stats
+# ## If not specified, then default is:
+# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
+# ## For Linux, the default is:
+# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
+# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
+# ## By default, don't gather zpool stats
+# # poolMetrics = false
+
+
+# # Reads 'mntr' stats from one or many zookeeper servers
+# [[inputs.zookeeper]]
+# ## An array of address to gather stats about. Specify an ip or hostname
+# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
+#
+# ## If no servers are specified, then localhost is used as the host.
+# ## If no port is specified, 2181 is used
+# servers = [":2181"]
+#
+# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
+# # timeout = "5s"
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## If false, skip chain & host verification
+# # insecure_skip_verify = true
+
+
+###############################################################################
+# SERVICE INPUT PLUGINS #
+###############################################################################
+
+
+# # AMQP consumer plugin
+# [[inputs.amqp_consumer]]
+# ## Broker to consume from.
+# ## deprecated in 1.7; use the brokers option
+# # url = "amqp://localhost:5672/influxdb"
+#
+# ## Brokers to consume from. If multiple brokers are specified a random broker
+# ## will be selected anytime a connection is established. This can be
+# ## helpful for load balancing when not using a dedicated load balancer.
+# brokers = ["amqp://localhost:5672/influxdb"]
+#
+# ## Authentication credentials for the PLAIN auth_method.
+# # username = ""
+# # password = ""
+#
+# ## Name of the exchange to declare. If unset, no exchange will be declared.
+# exchange = "telegraf"
+#
+# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
+# # exchange_type = "topic"
+#
+# ## If true, exchange will be passively declared.
+# # exchange_passive = false
+#
+# ## Exchange durability can be either "transient" or "durable".
+# # exchange_durability = "durable"
+#
+# ## Additional exchange arguments.
+# # exchange_arguments = { }
+# # exchange_arguments = {"hash_property" = "timestamp"}
+#
+# ## AMQP queue name.
+# queue = "telegraf"
+#
+# ## AMQP queue durability can be "transient" or "durable".
+# queue_durability = "durable"
+#
+# ## If true, queue will be passively declared.
+# # queue_passive = false
+#
+# ## A binding between the exchange and queue using this binding key is
+# ## created. If unset, no binding is created.
+# binding_key = "#"
+#
+# ## Maximum number of messages server should give to the worker.
+# # prefetch_count = 50
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Auth method. PLAIN and EXTERNAL are supported
+# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
+# ## described here: https://www.rabbitmq.com/plugins.html
+# # auth_method = "PLAIN"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read Cassandra metrics through Jolokia
+# [[inputs.cassandra]]
+# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
+# ## jolokia2 plugin instead.
+# ##
+# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
+#
+# context = "/jolokia/read"
+# ## List of cassandra servers exposing jolokia read service
+# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
+# ## List of metrics collected on above servers
+# ## Each metric consists of a jmx path.
+# ## This will collect all heap memory usage metrics from the jvm and
+# ## ReadLatency metrics for all keyspaces and tables.
+# ## "type=Table" in the query works with Cassandra3.0. Older versions might
+# ## need to use "type=ColumnFamily"
+# metrics = [
+# "/java.lang:type=Memory/HeapMemoryUsage",
+# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
+# ]
+
+
+# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
+# [[inputs.cisco_telemetry_mdt]]
+# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when
+# ## using the grpc transport.
+# transport = "grpc"
+#
+# ## Address and port to host telemetry listener
+# service_address = ":57000"
+#
+# ## Enable TLS; grpc transport only.
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Enable TLS client authentication and define allowed CA certificates; grpc
+# ## transport only.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags
+# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"]
+#
+# ## Define aliases to map telemetry encoding paths to simple measurement names
+# [inputs.cisco_telemetry_mdt.aliases]
+# ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
+
+
+# # Read metrics from one or many ClickHouse servers
+# [[inputs.clickhouse]]
+# ## Username for authorization on ClickHouse server
+# ## example: username = "default""
+# username = "default"
+#
+# ## Password for authorization on ClickHouse server
+# ## example: password = "super_secret"
+#
+# ## HTTP(s) timeout while getting metrics values
+# ## The timeout includes connection time, any redirects, and reading the response body.
+# ## example: timeout = 1s
+# # timeout = 5s
+#
+# ## List of servers for metrics scraping
+# ## metrics scrape via HTTP(s) clickhouse interface
+# ## https://clickhouse.tech/docs/en/interfaces/http/
+# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"]
+# servers = ["http://127.0.0.1:8123"]
+#
+# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster
+# ## with using same "user:password" described in "user" and "password" parameters
+# ## and get this server hostname list from "system.clusters" table
+# ## see
+# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters
+# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/
+# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables
+# ## example: auto_discovery = false
+# # auto_discovery = true
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster IN (...)" filter will apply
+# ## please use only full cluster names here, regexp and glob filters is not allowed
+# ## for "/etc/clickhouse-server/config.d/remote.xml"
+# ##
+# ##
+# ##
+# ##
+# ## clickhouse-ru-1.local9000
+# ## clickhouse-ru-2.local9000
+# ##
+# ##
+# ## clickhouse-eu-1.local9000
+# ## clickhouse-eu-2.local9000
+# ##
+# ##
+# ##
+# ##
+# ##
+# ##
+# ## example: cluster_include = ["my-own-cluster"]
+# # cluster_include = []
+#
+# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true"
+# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply
+# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"]
+# # cluster_exclude = []
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Read metrics from Google PubSub
+# [[inputs.cloud_pubsub]]
+# ## Required. Name of Google Cloud Platform (GCP) Project that owns
+# ## the given PubSub subscription.
+# project = "my-project"
+#
+# ## Required. Name of PubSub subscription to ingest metrics from.
+# subscription = "my-subscription"
+#
+# ## Required. Data format to consume.
+# ## Each data format has its own unique set of configuration options.
+# ## Read more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
+# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
+# ## Application Default Credentials, which is preferred.
+# # credentials_file = "path/to/my/creds.json"
+#
+# ## Optional. Number of seconds to wait before attempting to restart the
+# ## PubSub subscription receiver after an unexpected error.
+# ## If the streaming pull for a PubSub Subscription fails (receiver),
+# ## the agent attempts to restart receiving messages after this many seconds.
+# # retry_delay_seconds = 5
+#
+# ## Optional. Maximum byte length of a message to consume.
+# ## Larger messages are dropped with an error. If less than 0 or unspecified,
+# ## treated as no limit.
+# # max_message_len = 1000000
+#
+# ## Optional. Maximum messages to read from PubSub that have not been written
+# ## to an output. Defaults to 1000.
+# ## For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message contains 10 metrics and the output
+# ## metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## The following are optional Subscription ReceiveSettings in PubSub.
+# ## Read more about these values:
+# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
+#
+# ## Optional. Maximum number of seconds for which a PubSub subscription
+# ## should auto-extend the PubSub ACK deadline for each message. If less than
+# ## 0, auto-extension is disabled.
+# # max_extension = 0
+#
+# ## Optional. Maximum number of unprocessed messages in PubSub
+# ## (unacknowledged but not yet expired in PubSub).
+# ## A value of 0 is treated as the default PubSub value.
+# ## Negative values will be treated as unlimited.
+# # max_outstanding_messages = 0
+#
+# ## Optional. Maximum size in bytes of unprocessed messages in PubSub
+# ## (unacknowledged but not yet expired in PubSub).
+# ## A value of 0 is treated as the default PubSub value.
+# ## Negative values will be treated as unlimited.
+# # max_outstanding_bytes = 0
+#
+# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
+# ## to pull messages from PubSub concurrently. This limit applies to each
+# ## subscription separately and is treated as the PubSub default if less than
+# ## 1. Note this setting does not limit the number of messages that can be
+# ## processed concurrently (use "max_outstanding_messages" instead).
+# # max_receiver_go_routines = 0
+#
+# ## Optional. If true, Telegraf will attempt to base64 decode the
+# ## PubSub message data before parsing
+# # base64_data = false
+
+
+# # Google Cloud Pub/Sub Push HTTP listener
+# [[inputs.cloud_pubsub_push]]
+# ## Address and port to host HTTP listener on
+# service_address = ":8080"
+#
+# ## Application secret to verify messages originate from Cloud Pub/Sub
+# # token = ""
+#
+# ## Path to listen to.
+# # path = "/"
+#
+# ## Maximum duration before timing out read of the request
+# # read_timeout = "10s"
+# ## Maximum duration before timing out write of the response. This should be set to a value
+# ## large enough that you can send at least 'metric_batch_size' number of messages within the
+# ## duration.
+# # write_timeout = "10s"
+#
+# ## Maximum allowed http request body size in bytes.
+# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
+# # max_body_size = "500MB"
+#
+# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
+# # add_meta = false
+#
+# ## Optional. Maximum messages to read from PubSub that have not been written
+# ## to an output. Defaults to 1000.
+# ## For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message contains 10 metrics and the output
+# ## metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read logging output from the Docker engine
+# [[inputs.docker_log]]
+# ## Docker Endpoint
+# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
+# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
+# # endpoint = "unix:///var/run/docker.sock"
+#
+# ## When true, container logs are read from the beginning; otherwise
+# ## reading begins at the end of the log.
+# # from_beginning = false
+#
+# ## Timeout for Docker API calls.
+# # timeout = "5s"
+#
+# ## Containers to include and exclude. Globs accepted.
+# ## Note that an empty array for both will include all containers
+# # container_name_include = []
+# # container_name_exclude = []
+#
+# ## Container states to include and exclude. Globs accepted.
+# ## When empty only containers in the "running" state will be captured.
+# # container_state_include = []
+# # container_state_exclude = []
+#
+# ## docker labels to include and exclude as tags. Globs accepted.
+# ## Note that an empty array for both will include all labels as tags
+# # docker_label_include = []
+# # docker_label_exclude = []
+#
+# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
+# source_tag = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # Azure Event Hubs service input plugin
+# [[inputs.eventhub_consumer]]
+# ## The default behavior is to create a new Event Hub client from environment variables.
+# ## This requires one of the following sets of environment variables to be set:
+# ##
+# ## 1) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_CONNECTION_STRING"
+# ##
+# ## 2) Expected Environment Variables:
+# ## - "EVENTHUB_NAMESPACE"
+# ## - "EVENTHUB_NAME"
+# ## - "EVENTHUB_KEY_NAME"
+# ## - "EVENTHUB_KEY_VALUE"
+#
+# ## Uncommenting the option below will create an Event Hub client based solely on the connection string.
+# ## This can either be the associated environment variable or hard coded directly.
+# # connection_string = ""
+#
+# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister
+# # persistence_dir = ""
+#
+# ## Change the default consumer group
+# # consumer_group = ""
+#
+# ## By default the event hub receives all messages present on the broker, alternative modes can be set below.
+# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339).
+# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run).
+# # from_timestamp =
+# # latest = true
+#
+# ## Set a custom prefetch count for the receiver(s)
+# # prefetch_count = 1000
+#
+# ## Add an epoch to the receiver(s)
+# # epoch = 0
+#
+# ## Change to set a custom user agent, "telegraf" is used by default
+# # user_agent = "telegraf"
+#
+# ## To consume from a specific partition, set the partition_ids option.
+# ## An empty array will result in receiving from all partitions.
+# # partition_ids = ["0","1"]
+#
+# ## Max undelivered messages
+# # max_undelivered_messages = 1000
+#
+# ## Set either option below to true to use a system property as timestamp.
+# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime.
+# ## It is recommended to use this setting when the data itself has no timestamp.
+# # enqueued_time_as_ts = true
+# # iot_hub_enqueued_time_as_ts = true
+#
+# ## Tags or fields to create from keys present in the application property bag.
+# ## These could for example be set by message enrichments in Azure IoT Hub.
+# # application_property_tags = []
+# # application_property_fields = []
+#
+# ## Tag or field name to use for metadata
+# ## By default all metadata is disabled
+# # sequence_number_field = "SequenceNumber"
+# # enqueued_time_field = "EnqueuedTime"
+# # offset_field = "Offset"
+# # partition_id_tag = "PartitionID"
+# # partition_key_tag = "PartitionKey"
+# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID"
+# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID"
+# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod"
+# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID"
+# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Run executable as long-running input plugin
+# [[inputs.execd]]
+# ## Program to run as daemon
+# command = ["telegraf-smartctl", "-d", "/dev/sda"]
+#
+# ## Define how the process is signaled on each collection interval.
+# ## Valid values are:
+# ## "none" : Do not signal anything.
+# ## The process must output metrics by itself.
+# ## "STDIN" : Send a newline on STDIN.
+# ## "SIGHUP" : Send a HUP signal. Not available on Windows.
+# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows.
+# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows.
+# signal = "none"
+#
+# ## Delay before the process is restarted after an unexpected termination
+# restart_delay = "10s"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # gNMI telemetry input plugin
+# [[inputs.gnmi]]
+# ## Address and port of the gNMI GRPC server
+# addresses = ["10.49.234.114:57777"]
+#
+# ## define credentials
+# username = "cisco"
+# password = "cisco"
+#
+# ## gNMI encoding requested (one of: "proto", "json", "json_ietf")
+# # encoding = "proto"
+#
+# ## redial in case of failures after
+# redial = "10s"
+#
+# ## enable client-side TLS and define CA to authenticate the device
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # insecure_skip_verify = true
+#
+# ## define client-side TLS certificate & key to authenticate to the device
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## gNMI subscription prefix (optional, can usually be left empty)
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# # origin = ""
+# # prefix = ""
+# # target = ""
+#
+# ## Define additional aliases to map telemetry encoding paths to simple measurement names
+# #[inputs.gnmi.aliases]
+# # ifcounters = "openconfig:/interfaces/interface/state/counters"
+#
+# [[inputs.gnmi.subscription]]
+# ## Name of the measurement that will be emitted
+# name = "ifcounters"
+#
+# ## Origin and path of the subscription
+# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
+# ##
+# ## origin usually refers to a (YANG) data model implemented by the device
+# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath)
+# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
+# origin = "openconfig-interfaces"
+# path = "/interfaces/interface/state/counters"
+#
+# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
+# subscription_mode = "sample"
+# sample_interval = "10s"
+#
+# ## Suppress redundant transmissions when measured values are unchanged
+# # suppress_redundant = false
+#
+# ## If suppression is enabled, send updates at least every X seconds anyway
+# # heartbeat_interval = "60s"
+
+
+# # Accept metrics over InfluxDB 1.x HTTP API
+# [[inputs.http_listener]]
+# ## Address and port to host InfluxDB listener on
+# service_address = ":8186"
+#
+# ## maximum duration before timing out read of the request
+# read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# write_timeout = "10s"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
+#
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+
+
+# # Generic HTTP write listener
+# [[inputs.http_listener_v2]]
+# ## Address and port to host HTTP listener on
+# service_address = ":8080"
+#
+# ## Path to listen to.
+# # path = "/telegraf"
+#
+# ## HTTP methods to accept.
+# # methods = ["POST", "PUT"]
+#
+# ## maximum duration before timing out read of the request
+# # read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# # write_timeout = "10s"
+#
+# ## Maximum allowed http request body size in bytes.
+# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
+# # max_body_size = "500MB"
+#
+# ## Part of the request to consume. Available options are "body" and
+# ## "query".
+# # data_source = "body"
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+#
+# ## Optional setting to map http headers into tags
+# ## If the http header is not present on the request, no corresponding tag will be added
+# ## If multiple instances of the http header are present, only the first value will be used
+# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"}
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Accept metrics over InfluxDB 1.x HTTP API
+# [[inputs.influxdb_listener]]
+# ## Address and port to host InfluxDB listener on
+# service_address = ":8186"
+#
+# ## maximum duration before timing out read of the request
+# read_timeout = "10s"
+# ## maximum duration before timing out write of the response
+# write_timeout = "10s"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# max_body_size = "32MiB"
+#
+# ## Optional tag name used to store the database.
+# ## If the write has a database in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # database_tag = ""
+#
+# ## If set the retention policy specified in the write query will be added as
+# ## the value of this tag name.
+# # retention_policy_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# tls_cert = "/etc/telegraf/cert.pem"
+# tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional username and password to accept for HTTP basic authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # basic_username = "foobar"
+# # basic_password = "barfoo"
+
+
+# # Accept metrics over InfluxDB 2.x HTTP API
+# [[inputs.influxdb_v2_listener]]
+# ## Address and port to host InfluxDB listener on
+# ## (Double check the port. Could be 9999 if using OSS Beta)
+# service_address = ":8086"
+#
+# ## Maximum allowed HTTP request body size in bytes.
+# ## 0 means to use the default of 32MiB.
+# # max_body_size = "32MiB"
+#
+# ## Optional tag to determine the bucket.
+# ## If the write has a bucket in the query string then it will be kept in this tag name.
+# ## This tag can be used in downstream outputs.
+# ## The default value of nothing means it will be off and the database will not be recorded.
+# # bucket_tag = ""
+#
+# ## Set one or more allowed client CA certificate file names to
+# ## enable mutually authenticated TLS connections
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Add service certificate and key
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Optional token to accept for HTTP authentication.
+# ## You probably want to make sure you have TLS configured above for this.
+# # token = "some-long-shared-secret-token"
+
+
+# # Intel Resource Director Technology plugin
+# [[inputs.intel_rdt]]
+# ## Optionally set sampling interval to Nx100ms.
+# ## This value is propagated to pqos tool. Interval format is defined by pqos itself.
+# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s.
+# # sampling_interval = "10"
+#
+# ## Optionally specify the path to pqos executable.
+# ## If not provided, auto discovery will be performed.
+# # pqos_path = "/usr/local/bin/pqos"
+#
+# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated.
+# ## If not provided, default value is false.
+# # shortened_metrics = false
+#
+# ## Specify the list of groups of CPU core(s) to be provided as pqos input.
+# ## Mandatory if processes aren't set and forbidden if processes are specified.
+# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"]
+# # cores = ["0-3"]
+#
+# ## Specify the list of processes for which Metrics will be collected.
+# ## Mandatory if cores aren't set and forbidden if cores are specified.
+# ## e.g. ["qemu", "pmd"]
+# # processes = ["process"]
+
+
+# # Read JTI OpenConfig Telemetry from listed sensors
+# [[inputs.jti_openconfig_telemetry]]
+# ## List of device addresses to collect telemetry from
+# servers = ["localhost:1883"]
+#
+# ## Authentication details. Username and password are must if device expects
+# ## authentication. Client ID must be unique when connecting from multiple instances
+# ## of telegraf to the same device
+# username = "user"
+# password = "pass"
+# client_id = "telegraf"
+#
+# ## Frequency to get data
+# sample_frequency = "1000ms"
+#
+# ## Sensors to subscribe for
+# ## A identifier for each sensor can be provided in path by separating with space
+# ## Else sensor path will be used as identifier
+# ## When identifier is used, we can provide a list of space separated sensors.
+# ## A single subscription will be created with all these sensors and data will
+# ## be saved to measurement with this identifier name
+# sensors = [
+# "/interfaces/",
+# "collection /components/ /lldp",
+# ]
+#
+# ## We allow specifying sensor group level reporting rate. To do this, specify the
+# ## reporting rate in Duration at the beginning of sensor paths / collection
+# ## name. For entries without reporting rate, we use configured sample frequency
+# sensors = [
+# "1000ms customReporting /interfaces /lldp",
+# "2000ms collection /components",
+# "/interfaces",
+# ]
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
+# ## Failed streams/calls will not be retried if 0 is provided
+# retry_delay = "1000ms"
+#
+# ## To treat all string values as tags, set this to true
+# str_as_tags = false
+
+
+# # Read metrics from Kafka topics
+# [[inputs.kafka_consumer]]
+# ## Kafka brokers.
+# brokers = ["localhost:9092"]
+#
+# ## Topics to consume.
+# topics = ["telegraf"]
+#
+# ## When set this tag will be added to all metrics with the topic as the value.
+# # topic_tag = ""
+#
+# ## Optional Client id
+# # client_id = "Telegraf"
+#
+# ## Set the minimal supported Kafka version. Setting this enables the use of new
+# ## Kafka features and APIs. Must be 0.10.2.0 or greater.
+# ## ex: version = "1.1.0"
+# # version = ""
+#
+# ## Optional TLS Config
+# # enable_tls = true
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## SASL authentication credentials. These settings should typically be used
+# ## with TLS encryption enabled using the "enable_tls" option.
+# # sasl_username = "kafka"
+# # sasl_password = "secret"
+#
+# ## SASL protocol version. When connecting to Azure EventHub set to 0.
+# # sasl_version = 1
+#
+# ## Name of the consumer group.
+# # consumer_group = "telegraf_metrics_consumers"
+#
+# ## Initial offset position; one of "oldest" or "newest".
+# # offset = "oldest"
+#
+# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky".
+# # balance_strategy = "range"
+#
+# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+# ## larger messages are dropped
+# max_message_len = 1000000
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from Kafka topic(s)
+# [[inputs.kafka_consumer_legacy]]
+# ## topic(s) to consume
+# topics = ["telegraf"]
+#
+# ## an array of Zookeeper connection strings
+# zookeeper_peers = ["localhost:2181"]
+#
+# ## Zookeeper Chroot
+# zookeeper_chroot = ""
+#
+# ## the name of the consumer group
+# consumer_group = "telegraf_metrics_consumers"
+#
+# ## Offset (must be either "oldest" or "newest")
+# offset = "oldest"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
+# ## larger messages are dropped
+# max_message_len = 65536
+
+
+# # Configuration for the AWS Kinesis input.
+# [[inputs.kinesis_consumer]]
+# ## Amazon REGION of kinesis endpoint.
+# region = "ap-southeast-2"
+#
+# ## Amazon Credentials
+# ## Credentials are loaded in the following order
+# ## 1) Assumed credentials via STS if role_arn is specified
+# ## 2) explicit credentials from 'access_key' and 'secret_key'
+# ## 3) shared profile from 'profile'
+# ## 4) environment variables
+# ## 5) shared credentials file
+# ## 6) EC2 Instance Profile
+# # access_key = ""
+# # secret_key = ""
+# # token = ""
+# # role_arn = ""
+# # profile = ""
+# # shared_credential_file = ""
+#
+# ## Endpoint to make request against, the correct endpoint is automatically
+# ## determined and this option should only be set if you wish to override the
+# ## default.
+# ## ex: endpoint_url = "http://localhost:8000"
+# # endpoint_url = ""
+#
+# ## Kinesis StreamName must exist prior to starting telegraf.
+# streamname = "StreamName"
+#
+# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
+# # shard_iterator_type = "TRIM_HORIZON"
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## Optional
+# ## Configuration for a dynamodb checkpoint
+# [inputs.kinesis_consumer.checkpoint_dynamodb]
+# ## unique name for this consumer
+# app_name = "default"
+# table_name = "default"
+
+
+# # Read metrics off Arista LANZ, via socket
+# [[inputs.lanz]]
+# ## URL to Arista LANZ endpoint
+# servers = [
+# "tcp://127.0.0.1:50001"
+# ]
+
+
+# # Stream and parse log file(s).
+# [[inputs.logparser]]
+# ## Log files to parse.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## /var/log/**.log -> recursively find all .log files in /var/log
+# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
+# ## /var/log/apache.log -> only tail the apache log file
+# files = ["/var/log/apache/access.log"]
+#
+# ## Read files that currently exist from the beginning. Files that are created
+# ## while telegraf is running (and that match the "files" globs) will always
+# ## be read from the beginning.
+# from_beginning = false
+#
+# ## Method used to watch for file updates. Can be either "inotify" or "poll".
+# # watch_method = "inotify"
+#
+# ## Parse logstash-style "grok" patterns:
+# [inputs.logparser.grok]
+# ## This is a list of patterns to check the given log file(s) for.
+# ## Note that adding patterns here increases processing time. The most
+# ## efficient configuration is to have one pattern per logparser.
+# ## Other common built-in patterns are:
+# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
+# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
+# patterns = ["%{COMBINED_LOG_FORMAT}"]
+#
+# ## Name of the outputted measurement name.
+# measurement = "apache_access_log"
+#
+# ## Full path(s) to custom pattern files.
+# custom_pattern_files = []
+#
+# ## Custom patterns can also be defined here. Put one pattern per line.
+# custom_patterns = '''
+# '''
+#
+# ## Timezone allows you to provide an override for timestamps that
+# ## don't already include an offset
+# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
+# ##
+# ## Default: "" which renders UTC
+# ## Options are as follows:
+# ## 1. Local -- interpret based on machine localtime
+# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
+# # timezone = "Canada/Eastern"
+#
+# ## When set to "disable", timestamp will not incremented if there is a
+# ## duplicate.
+# # unique_timestamp = "auto"
+
+
+# # Read metrics from MQTT topic(s)
+# [[inputs.mqtt_consumer]]
+# ## Broker URLs for the MQTT server or cluster. To connect to multiple
+# ## clusters or standalone servers, use a seperate plugin instance.
+# ## example: servers = ["tcp://localhost:1883"]
+# ## servers = ["ssl://localhost:1883"]
+# ## servers = ["ws://localhost:1883"]
+# servers = ["tcp://127.0.0.1:1883"]
+#
+# ## Topics that will be subscribed to.
+# topics = [
+# "telegraf/host01/cpu",
+# "telegraf/+/mem",
+# "sensors/#",
+# ]
+#
+# ## The message topic will be stored in a tag specified by this value. If set
+# ## to the empty string no topic tag will be created.
+# # topic_tag = "topic"
+#
+# ## QoS policy for messages
+# ## 0 = at most once
+# ## 1 = at least once
+# ## 2 = exactly once
+# ##
+# ## When using a QoS of 1 or 2, you should enable persistent_session to allow
+# ## resuming unacknowledged messages.
+# # qos = 0
+#
+# ## Connection timeout for initial connection in seconds
+# # connection_timeout = "30s"
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Persistent session disables clearing of the client session on connection.
+# ## In order for this option to work you must also set client_id to identify
+# ## the client. To receive messages that arrived while the client is offline,
+# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
+# ## publishing.
+# # persistent_session = false
+#
+# ## If unset, a random client ID will be generated.
+# # client_id = ""
+#
+# ## Username and password to connect MQTT server.
+# # username = "telegraf"
+# # password = "metricsmetricsmetricsmetrics"
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from NATS subject(s)
+# [[inputs.nats_consumer]]
+# ## urls of NATS servers
+# servers = ["nats://localhost:4222"]
+#
+# ## subject(s) to consume
+# subjects = ["telegraf"]
+#
+# ## name a queue group
+# queue_group = "telegraf_consumers"
+#
+# ## Optional credentials
+# # username = ""
+# # password = ""
+#
+# ## Optional NATS 2.0 and NATS NGS compatible user credentials
+# # credentials = "/etc/telegraf/nats.creds"
+#
+# ## Use Transport Layer Security
+# # secure = false
+#
+# ## Optional TLS Config
+# # tls_ca = "/etc/telegraf/ca.pem"
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+#
+# ## Sets the limits for pending msgs and bytes for each subscription
+# ## These shouldn't need to be adjusted except in very high throughput scenarios
+# # pending_message_limit = 65536
+# # pending_bytes_limit = 67108864
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read NSQ topic for metrics.
+# [[inputs.nsq_consumer]]
+# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
+# # server = "localhost:4150"
+#
+# ## An array representing the NSQD TCP HTTP Endpoints
+# nsqd = ["localhost:4150"]
+#
+# ## An array representing the NSQLookupd HTTP Endpoints
+# nsqlookupd = ["localhost:4161"]
+# topic = "telegraf"
+# channel = "consumer"
+# max_in_flight = 100
+#
+# ## Maximum messages to read from the broker that have not been written by an
+# ## output. For best throughput set based on the number of metrics within
+# ## each message and the size of the output's metric_batch_size.
+# ##
+# ## For example, if each message from the queue contains 10 metrics and the
+# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
+# ## full batch is collected and the write is triggered immediately without
+# ## waiting until the next flush_interval.
+# # max_undelivered_messages = 1000
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+
+
+# # Read metrics from one or many pgbouncer servers
+# [[inputs.pgbouncer]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# ##
+# ## All connection parameters are optional.
+# ##
+# address = "host=localhost user=pgbouncer sslmode=disable"
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# ##
+# ## All connection parameters are optional.
+# ##
+# ## Without the dbname parameter, the driver will default to a database
+# ## with the same name as the user. This dbname is just for instantiating a
+# ## connection with the server and doesn't restrict the databases we are trying
+# ## to grab metrics for.
+# ##
+# address = "host=localhost user=postgres sslmode=disable"
+# ## A custom name for the database that will be used as the "server" tag in the
+# ## measurement output. If not specified, a default one generated from
+# ## the connection address is used.
+# # outputaddress = "db01"
+#
+# ## connection configuration.
+# ## maxlifetime - specify the maximum lifetime of a connection.
+# ## default is forever (0s)
+# max_lifetime = "0s"
+#
+# ## A list of databases to explicitly ignore. If not specified, metrics for all
+# ## databases are gathered. Do NOT use with the 'databases' option.
+# # ignored_databases = ["postgres", "template0", "template1"]
+#
+# ## A list of databases to pull metrics about. If not specified, metrics for all
+# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
+# # databases = ["app_production", "testing"]
+
+
+# # Read metrics from one or many postgresql servers
+# [[inputs.postgresql_extensible]]
+# ## specify address via a url matching:
+# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
+# ## ?sslmode=[disable|verify-ca|verify-full]
+# ## or a simple string:
+# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production
+# #
+# ## All connection parameters are optional. #
+# ## Without the dbname parameter, the driver will default to a database
+# ## with the same name as the user. This dbname is just for instantiating a
+# ## connection with the server and doesn't restrict the databases we are trying
+# ## to grab metrics for.
+# #
+# address = "host=localhost user=postgres sslmode=disable"
+#
+# ## connection configuration.
+# ## maxlifetime - specify the maximum lifetime of a connection.
+# ## default is forever (0s)
+# max_lifetime = "0s"
+#
+# ## A list of databases to pull metrics about. If not specified, metrics for all
+# ## databases are gathered.
+# ## databases = ["app_production", "testing"]
+# #
+# ## A custom name for the database that will be used as the "server" tag in the
+# ## measurement output. If not specified, a default one generated from
+# ## the connection address is used.
+# # outputaddress = "db01"
+# #
+# ## Define the toml config where the sql queries are stored
+# ## New queries can be added, if the withdbname is set to true and there is no
+# ## databases defined in the 'databases field', the sql query is ended by a
+# ## 'is not null' in order to make the query succeed.
+# ## Example :
+# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
+# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
+# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
+# ## withdbname was true. Be careful that if the withdbname is set to false you
+# ## don't have to define the where clause (aka with the dbname) the tagvalue
+# ## field is used to define custom tags (separated by commas)
+# ## The optional "measurement" value can be used to override the default
+# ## output measurement name ("postgresql").
+# ##
+# ## The script option can be used to specify the .sql file path.
+# ## If script and sqlquery options specified at same time, sqlquery will be used
+# ##
+# ## Structure :
+# ## [[inputs.postgresql_extensible.query]]
+# ## sqlquery string
+# ## version string
+# ## withdbname boolean
+# ## tagvalue string (comma separated)
+# ## measurement string
+# [[inputs.postgresql_extensible.query]]
+# sqlquery="SELECT * FROM pg_stat_database"
+# version=901
+# withdbname=false
+# tagvalue=""
+# measurement=""
+# [[inputs.postgresql_extensible.query]]
+# sqlquery="SELECT * FROM pg_stat_bgwriter"
+# version=901
+# withdbname=false
+# tagvalue="postgresql.stats"
+
+
+# # Read metrics from one or many prometheus clients
+# [[inputs.prometheus]]
+# ## An array of urls to scrape metrics from.
+# urls = ["http://localhost:9100/metrics"]
+#
+# ## Metric version controls the mapping from Prometheus metrics into
+# ## Telegraf metrics. When using the prometheus_client output, use the same
+# ## value in both plugins to ensure metrics are round-tripped without
+# ## modification.
+# ##
+# ## example: metric_version = 1; deprecated in 1.13
+# ## metric_version = 2; recommended version
+# # metric_version = 1
+#
+# ## Url tag name (tag containing scrapped url. optional, default is "url")
+# # url_tag = "scrapeUrl"
+#
+# ## An array of Kubernetes services to scrape metrics from.
+# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
+#
+# ## Kubernetes config file to create client from.
+# # kube_config = "/path/to/kubernetes.config"
+#
+# ## Scrape Kubernetes pods for the following prometheus annotations:
+# ## - prometheus.io/scrape: Enable scraping for this pod
+# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
+# ## set this to 'https' & most likely set the tls config.
+# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
+# ## - prometheus.io/port: If port is not 9102 use this annotation
+# # monitor_kubernetes_pods = true
+# ## Restricts Kubernetes monitoring to a single namespace
+# ## ex: monitor_kubernetes_pods_namespace = "default"
+# # monitor_kubernetes_pods_namespace = ""
+# # label selector to target pods which have the label
+# # kubernetes_label_selector = "env=dev,app=nginx"
+# # field selector to target pods
+# # eg. To scrape pods on a specific node
+# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME"
+#
+# ## Use bearer token for authorization. ('bearer_token' takes priority)
+# # bearer_token = "/path/to/bearer/token"
+# ## OR
+# # bearer_token_string = "abc_123"
+#
+# ## HTTP Basic Authentication username and password. ('bearer_token' and
+# ## 'bearer_token_string' take priority)
+# # username = ""
+# # password = ""
+#
+# ## Specify timeout duration for slower prometheus clients (default is 3s)
+# # response_timeout = "3s"
+#
+# ## Optional TLS Config
+# # tls_ca = /path/to/cafile
+# # tls_cert = /path/to/certfile
+# # tls_key = /path/to/keyfile
+# ## Use TLS but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # SFlow V5 Protocol Listener
+# [[inputs.sflow]]
+# ## Address to listen for sFlow packets.
+# ## example: service_address = "udp://:6343"
+# ## service_address = "udp4://:6343"
+# ## service_address = "udp6://:6343"
+# service_address = "udp://:6343"
+#
+# ## Set the size of the operating system's receive buffer.
+# ## example: read_buffer_size = "64KiB"
+# # read_buffer_size = ""
+
+
+# # Receive SNMP traps
+# [[inputs.snmp_trap]]
+# ## Transport, local address, and port to listen on. Transport must
+# ## be "udp://". Omit local address to listen on all interfaces.
+# ## example: "udp://127.0.0.1:1234"
+# ##
+# ## Special permissions may be required to listen on a port less than
+# ## 1024. See README.md for details
+# ##
+# # service_address = "udp://:162"
+# ## Timeout running snmptranslate command
+# # timeout = "5s"
+# ## Snmp version, defaults to 2c
+# # version = "2c"
+# ## SNMPv3 authentication and encryption options.
+# ##
+# ## Security Name.
+# # sec_name = "myuser"
+# ## Authentication protocol; one of "MD5", "SHA" or "".
+# # auth_protocol = "MD5"
+# ## Authentication password.
+# # auth_password = "pass"
+# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv".
+# # sec_level = "authNoPriv"
+# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "".
+# # priv_protocol = ""
+# ## Privacy password used for encrypted messages.
+# # priv_password = ""
+
+
+[[inputs.socket_listener]]
+ service_address = "udp://127.0.0.1:8094"
+ data_format = "influx"
+
+[[inputs.socket_listener]]
+ service_address = "tcp://127.0.0.1:8094"
+ data_format = "influx"
+
+# # Generic socket listener capable of handling multiple socket types.
+# [[inputs.socket_listener]]
+# ## URL to listen on
+# # service_address = "tcp://:8094"
+# # service_address = "tcp://127.0.0.1:http"
+# # service_address = "tcp4://:8094"
+# # service_address = "tcp6://:8094"
+# # service_address = "tcp6://[2001:db8::1]:8094"
+# # service_address = "udp://:8094"
+# # service_address = "udp4://:8094"
+# # service_address = "udp6://:8094"
+# # service_address = "unix:///tmp/telegraf.sock"
+# # service_address = "unixgram:///tmp/telegraf.sock"
+#
+# ## Change the file mode bits on unix sockets. These permissions may not be
+# ## respected by some platforms, to safely restrict write permissions it is best
+# ## to place the socket into a directory that has previously been created
+# ## with the desired permissions.
+# ## ex: socket_mode = "777"
+# # socket_mode = ""
+#
+# ## Maximum number of concurrent connections.
+# ## Only applies to stream sockets (e.g. TCP).
+# ## 0 (default) is unlimited.
+# # max_connections = 1024
+#
+# ## Read timeout.
+# ## Only applies to stream sockets (e.g. TCP).
+# ## 0 (default) is unlimited.
+# # read_timeout = "30s"
+#
+# ## Optional TLS configuration.
+# ## Only applies to stream sockets (e.g. TCP).
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+# ## Enables client authentication if set.
+# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+#
+# ## Maximum socket buffer size (in bytes when no unit specified).
+# ## For stream sockets, once the buffer fills up, the sender will start backing up.
+# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
+# ## Defaults to the OS default.
+# # read_buffer_size = "64KiB"
+#
+# ## Period between keep alive probes.
+# ## Only applies to TCP sockets.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# # keep_alive_period = "5m"
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# # data_format = "influx"
+#
+# ## Content encoding for message payloads, can be set to "gzip" to or
+# ## "identity" to apply no encoding.
+# # content_encoding = "identity"
+
+
+# # Statsd UDP/TCP Server
+[[inputs.statsd]]
+ protocol = "tcp"
+ max_tcp_connections = 250
+ tcp_keep_alive = false
+ # tcp_keep_alive_period = "2h"
+ service_address = "127.0.0.1:8125"
+ delete_gauges = true
+ delete_counters = true
+ delete_sets = true
+ delete_timings = true
+ ## Percentiles to calculate for timing & histogram stats.
+ percentiles = [50.0, 75.0, 95.0, 99.0, 99.95, 100.0]
+ metric_separator = "_"
+ datadog_extensions = true
+ allowed_pending_messages = 10000
+ percentile_limit = 1000
+ # read_buffer_size = 65535
+
+# [[inputs.statsd]]
+# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
+# protocol = "udp"
+#
+# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
+# max_tcp_connections = 250
+#
+# ## Enable TCP keep alive probes (default=false)
+# tcp_keep_alive = false
+#
+# ## Specifies the keep-alive period for an active network connection.
+# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
+# ## Defaults to the OS configuration.
+# # tcp_keep_alive_period = "2h"
+#
+# ## Address and port to host UDP listener on
+# service_address = ":8125"
+#
+# ## The following configuration options control when telegraf clears it's cache
+# ## of previous values. If set to false, then telegraf will only clear it's
+# ## cache when the daemon is restarted.
+# ## Reset gauges every interval (default=true)
+# delete_gauges = true
+# ## Reset counters every interval (default=true)
+# delete_counters = true
+# ## Reset sets every interval (default=true)
+# delete_sets = true
+# ## Reset timings & histograms every interval (default=true)
+# delete_timings = true
+#
+# ## Percentiles to calculate for timing & histogram stats
+# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
+#
+# ## separator to use between elements of a statsd metric
+# metric_separator = "_"
+#
+# ## Parses tags in the datadog statsd format
+# ## http://docs.datadoghq.com/guides/dogstatsd/
+# parse_data_dog_tags = false
+#
+# ## Parses datadog extensions to the statsd format
+# datadog_extensions = false
+#
+# ## Statsd data translation templates, more info can be read here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
+# # templates = [
+# # "cpu.* measurement*"
+# # ]
+#
+# ## Number of UDP messages allowed to queue up, once filled,
+# ## the statsd server will start dropping packets
+# allowed_pending_messages = 10000
+#
+# ## Number of timing/histogram values to track per-measurement in the
+# ## calculation of percentiles. Raising this limit increases the accuracy
+# ## of percentiles but also increases the memory usage and cpu time.
+# percentile_limit = 1000
+
+
+# # Suricata stats plugin
+# [[inputs.suricata]]
+# ## Data sink for Suricata stats log
+# # This is expected to be a filename of a
+# # unix socket to be created for listening.
+# source = "/var/run/suricata-stats.sock"
+#
+# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect"
+# # becomes "detect_alert" when delimiter is "_".
+# delimiter = "_"
+
+
+# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
+# [[inputs.syslog]]
+# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
+# ## Protocol, address and port to host the syslog receiver.
+# ## If no host is specified, then localhost is used.
+# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
+# server = "tcp://:6514"
+#
+# ## TLS Config
+# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
+# # tls_cert = "/etc/telegraf/cert.pem"
+# # tls_key = "/etc/telegraf/key.pem"
+#
+# ## Period between keep alive probes.
+# ## 0 disables keep alive probes.
+# ## Defaults to the OS configuration.
+# ## Only applies to stream sockets (e.g. TCP).
+# # keep_alive_period = "5m"
+#
+# ## Maximum number of concurrent connections (default = 0).
+# ## 0 means unlimited.
+# ## Only applies to stream sockets (e.g. TCP).
+# # max_connections = 1024
+#
+# ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
+# ## 0 means unlimited.
+# # read_timeout = "5s"
+#
+# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
+# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
+# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
+# ## Must be one of "octet-counting", "non-transparent".
+# # framing = "octet-counting"
+#
+# ## The trailer to be expected in case of non-transparent framing (default = "LF").
+# ## Must be one of "LF", or "NUL".
+# # trailer = "LF"
+#
+# ## Whether to parse in best effort mode or not (default = false).
+# ## By default best effort parsing is off.
+# # best_effort = false
+#
+# ## Character to prepend to SD-PARAMs (default = "_").
+# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
+# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
+# ## For each combination a field is created.
+# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
+# # sdparam_separator = "_"
+
+
+# # Parse the new lines appended to a file
+# [[inputs.tail]]
+# ## File names or a pattern to tail.
+# ## These accept standard unix glob matching rules, but with the addition of
+# ## ** as a "super asterisk". ie:
+# ## "/var/log/**.log" -> recursively find all .log files in /var/log
+# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
+# ## "/var/log/apache.log" -> just tail the apache log file
+# ##
+# ## See https://github.com/gobwas/glob for more examples
+# ##
+# files = ["/var/mymetrics.out"]
+#
+# ## Read file from beginning.
+# # from_beginning = false
+#
+# ## Whether file is a named pipe
+# # pipe = false
+#
+# ## Method used to watch for file updates. Can be either "inotify" or "poll".
+# # watch_method = "inotify"
+#
+# ## Maximum lines of the file to process that have not yet be written by the
+# ## output. For best throughput set based on the number of metrics on each
+# ## line and the size of the output's metric_batch_size.
+# # max_undelivered_lines = 1000
+#
+# ## Character encoding to use when interpreting the file contents. Invalid
+# ## characters are replaced using the unicode replacement character. When set
+# ## to the empty string the data is not decoded to text.
+# ## ex: character_encoding = "utf-8"
+# ## character_encoding = "utf-16le"
+# ## character_encoding = "utf-16be"
+# ## character_encoding = ""
+# # character_encoding = ""
+#
+# ## Data format to consume.
+# ## Each data format has its own unique set of configuration options, read
+# ## more about them here:
+# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
+# data_format = "influx"
+#
+# ## multiline parser/codec
+# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html
+# #[inputs.tail.multiline]
+# ## The pattern should be a regexp which matches what you believe to be an
+# ## indicator that the field is part of an event consisting of multiple lines of log data.
+# #pattern = "^\s"
+#
+# ## This field must be either "previous" or "next".
+# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line,
+# ## whereas "next" indicates that the line belongs to the next one.
+# #match_which_line = "previous"
+#
+# ## The invert_match field can be true or false (defaults to false).
+# ## If true, a message not matching the pattern will constitute a match of the multiline
+# ## filter and the what will be applied. (vice-versa is also true)
+# #invert_match = false
+#
+# ## After the specified timeout, this plugin sends a multiline event even if no new pattern
+# ## is found to start a new event. The default timeout is 5s.
+# #timeout = 5s
+
+
+# # Generic TCP listener
+# [[inputs.tcp_listener]]
+# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+# # socket_listener plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
+
+
+# # Generic UDP listener
+# [[inputs.udp_listener]]
+# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
+# # socket_listener plugin
+# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
+
+
+# # Read metrics from VMware vCenter
+# [[inputs.vsphere]]
+# ## List of vCenter URLs to be monitored. These three lines must be uncommented
+# ## and edited for the plugin to work.
+# vcenters = [ "https://vcenter.local/sdk" ]
+# username = "user@corp.local"
+# password = "secret"
+#
+# ## VMs
+# ## Typical VM metrics (if omitted or empty, all metrics are collected)
+# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
+# # vm_exclude = [] # Inventory paths to exclude
+# vm_metric_include = [
+# "cpu.demand.average",
+# "cpu.idle.summation",
+# "cpu.latency.average",
+# "cpu.readiness.average",
+# "cpu.ready.summation",
+# "cpu.run.summation",
+# "cpu.usagemhz.average",
+# "cpu.used.summation",
+# "cpu.wait.summation",
+# "mem.active.average",
+# "mem.granted.average",
+# "mem.latency.average",
+# "mem.swapin.average",
+# "mem.swapinRate.average",
+# "mem.swapout.average",
+# "mem.swapoutRate.average",
+# "mem.usage.average",
+# "mem.vmmemctl.average",
+# "net.bytesRx.average",
+# "net.bytesTx.average",
+# "net.droppedRx.summation",
+# "net.droppedTx.summation",
+# "net.usage.average",
+# "power.power.average",
+# "virtualDisk.numberReadAveraged.average",
+# "virtualDisk.numberWriteAveraged.average",
+# "virtualDisk.read.average",
+# "virtualDisk.readOIO.latest",
+# "virtualDisk.throughput.usage.average",
+# "virtualDisk.totalReadLatency.average",
+# "virtualDisk.totalWriteLatency.average",
+# "virtualDisk.write.average",
+# "virtualDisk.writeOIO.latest",
+# "sys.uptime.latest",
+# ]
+# # vm_metric_exclude = [] ## Nothing is excluded by default
+# # vm_instances = true ## true by default
+#
+# ## Hosts
+# ## Typical host metrics (if omitted or empty, all metrics are collected)
+# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
+# # host_exclude [] # Inventory paths to exclude
+# host_metric_include = [
+# "cpu.coreUtilization.average",
+# "cpu.costop.summation",
+# "cpu.demand.average",
+# "cpu.idle.summation",
+# "cpu.latency.average",
+# "cpu.readiness.average",
+# "cpu.ready.summation",
+# "cpu.swapwait.summation",
+# "cpu.usage.average",
+# "cpu.usagemhz.average",
+# "cpu.used.summation",
+# "cpu.utilization.average",
+# "cpu.wait.summation",
+# "disk.deviceReadLatency.average",
+# "disk.deviceWriteLatency.average",
+# "disk.kernelReadLatency.average",
+# "disk.kernelWriteLatency.average",
+# "disk.numberReadAveraged.average",
+# "disk.numberWriteAveraged.average",
+# "disk.read.average",
+# "disk.totalReadLatency.average",
+# "disk.totalWriteLatency.average",
+# "disk.write.average",
+# "mem.active.average",
+# "mem.latency.average",
+# "mem.state.latest",
+# "mem.swapin.average",
+# "mem.swapinRate.average",
+# "mem.swapout.average",
+# "mem.swapoutRate.average",
+# "mem.totalCapacity.average",
+# "mem.usage.average",
+# "mem.vmmemctl.average",
+# "net.bytesRx.average",
+# "net.bytesTx.average",
+# "net.droppedRx.summation",
+# "net.droppedTx.summation",
+# "net.errorsRx.summation",
+# "net.errorsTx.summation",
+# "net.usage.average",
+# "power.power.average",
+# "storageAdapter.numberReadAveraged.average",
+# "storageAdapter.numberWriteAveraged.average",
+# "storageAdapter.read.average",
+# "storageAdapter.write.average",
+# "sys.uptime.latest",
+# ]
+# ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
+# # ip_addresses = ["ipv6", "ipv4" ]
+#
+# # host_metric_exclude = [] ## Nothing excluded by default
+# # host_instances = true ## true by default
+#
+#
+# ## Clusters
+# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # cluster_exclude = [] # Inventory paths to exclude
+# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
+# # cluster_metric_exclude = [] ## Nothing excluded by default
+# # cluster_instances = false ## false by default
+#
+# ## Datastores
+# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
+# # datastore_exclude = [] # Inventory paths to exclude
+# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
+# # datastore_metric_exclude = [] ## Nothing excluded by default
+# # datastore_instances = false ## false by default
+#
+# ## Datacenters
+# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
+# # datacenter_exclude = [] # Inventory paths to exclude
+# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
+# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
+# # datacenter_instances = false ## false by default
+#
+# ## Plugin Settings
+# ## separator character to use for measurement and field names (default: "_")
+# # separator = "_"
+#
+# ## number of objects to retrieve per query for realtime resources (vms and hosts)
+# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
+# # max_query_objects = 256
+#
+# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
+# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
+# # max_query_metrics = 256
+#
+# ## number of go routines to use for collection and discovery of objects and metrics
+# # collect_concurrency = 1
+# # discover_concurrency = 1
+#
+# ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
+# # object_discovery_interval = "300s"
+#
+# ## timeout applies to any of the api request made to vcenter
+# # timeout = "60s"
+#
+# ## When set to true, all samples are sent as integers. This makes the output
+# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all
+# ## samples from vCenter, with the exception of percentages, are integer
+# ## values, but under some conditions, some averaging takes place internally in
+# ## the plugin. Setting this flag to "false" will send values as floats to
+# ## preserve the full precision when averaging takes place.
+# # use_int_samples = true
+#
+# ## Custom attributes from vCenter can be very useful for queries in order to slice the
+# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
+# ## by default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# ## By default, since they can add a considerable amount of tags to the resulting metrics. To
+# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
+# ## to select the attributes you want to include.
+# # custom_attribute_include = []
+# # custom_attribute_exclude = ["*"]
+#
+# ## Optional SSL Config
+# # ssl_ca = "/path/to/cafile"
+# # ssl_cert = "/path/to/certfile"
+# # ssl_key = "/path/to/keyfile"
+# ## Use SSL but skip chain & host verification
+# # insecure_skip_verify = false
+
+
+# # A Webhooks Event collector
+# [[inputs.webhooks]]
+# ## Address and port to host Webhook listener on
+# service_address = ":1619"
+#
+# [inputs.webhooks.filestack]
+# path = "/filestack"
+#
+# [inputs.webhooks.github]
+# path = "/github"
+# # secret = ""
+#
+# [inputs.webhooks.mandrill]
+# path = "/mandrill"
+#
+# [inputs.webhooks.rollbar]
+# path = "/rollbar"
+#
+# [inputs.webhooks.papertrail]
+# path = "/papertrail"
+#
+# [inputs.webhooks.particle]
+# path = "/particle"
+
+
+# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
+# [[inputs.zipkin]]
+# # path = "/api/v1/spans" # URL path for span data
+# # port = 9411 # Port on which Telegraf listens
+
diff --git a/packer/jambonz-mini/proxmox/files/vanilla_modules.conf.xml.patch b/packer/jambonz-mini/proxmox/files/vanilla_modules.conf.xml.patch
new file mode 100644
index 0000000..57aca31
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/vanilla_modules.conf.xml.patch
@@ -0,0 +1,105 @@
+--- modules.conf.xml 2019-09-30 19:01:33.304020805 +0000
++++ modules.conf.xml.new 2019-09-30 23:11:23.371830901 +0000
+@@ -1,5 +1,6 @@
+
+
++
+
+
+
+@@ -10,7 +11,7 @@
+
+
+
+-
++
+
+
+
+@@ -39,7 +40,7 @@
+
+
+
+-
++
+
+
+
+@@ -47,28 +48,28 @@
+
+
+
+-
++
+
+
+-
++
+
+
+
+-
++
+
+-
+-
+-
++
++
++
+
+-
++
+
+
+
+
+-
+-
++
++
+
+-
++
+
+
+
+@@ -87,7 +88,7 @@
+
+
+
+-
++
+
+
+
+@@ -96,17 +97,17 @@
+
+
+
+-
++
+
+
+
+
+
+-
++
+
+
+
+-
++
+
+
+
+@@ -123,7 +124,7 @@
+
+
+
+-
++
+
+
+
diff --git a/packer/jambonz-mini/proxmox/files/vimrc.local b/packer/jambonz-mini/proxmox/files/vimrc.local
new file mode 100644
index 0000000..f0f40c1
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/files/vimrc.local
@@ -0,0 +1,5 @@
+source /usr/share/vim/vim80/defaults.vim
+let skip_defaults_vim = 1
+if has('mouse')
+ set mouse=r
+endif
diff --git a/packer/jambonz-mini/proxmox/scripts/install_apiban.sh b/packer/jambonz-mini/proxmox/scripts/install_apiban.sh
new file mode 100644
index 0000000..e781a1d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_apiban.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+cd /usr/local/src/
+git clone https://github.com/palner/apiban.git
+sudo mkdir /usr/local/bin/apiban && sudo chmod 0755 /usr/local/bin/apiban
+sudo cp -r /usr/local/src/apiban/clients/go/apiban-iptables-client /usr/local/bin/apiban && sudo chmod +x /usr/local/bin/apiban/apiban-iptables-client
+sudo cp /tmp/config.json /usr/local/bin/apiban/config.json
+sudo chmod 0644 /usr/local/bin/apiban/config.json
+sudo cp /tmp/apiban.logrotate /etc/logrotate.d/apiban-client
+sudo chmod 0644 /etc/logrotate.d/apiban-client
+echo "*/4 * * * * root cd /usr/local/bin/apiban && ./apiban-iptables-client >/dev/null 2>&1" | sudo tee -a /etc/crontab
diff --git a/packer/jambonz-mini/proxmox/scripts/install_app.sh b/packer/jambonz-mini/proxmox/scripts/install_app.sh
new file mode 100755
index 0000000..a006af9
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_app.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+VERSION=$1
+DB_USER=$2
+DB_PASS=$3
+
+cd /home/admin
+cp /tmp/ecosystem.config.js apps
+
+echo "building jambonz-feature-server.."
+cd /home/admin/apps/jambonz-feature-server && npm ci --unsafe-perm
+echo "building fsw-clear-old-calls.."
+cd /home/admin/apps/fsw-clear-old-calls && npm ci --unsafe-perm && sudo npm install -g .
+echo "building jambonz-api-server.."
+cd /home/admin/apps/jambonz-api-server && npm ci --unsafe-perm
+echo "building jambonz-webapp.."
+cd /home/admin/apps/jambonz-webapp && npm ci --unsafe-perm && npm run build
+echo "building sbc-sip-sidecar.."
+cd /home/admin/apps/sbc-sip-sidecar && npm ci --unsafe-perm
+echo "building sbc-inbound.."
+cd /home/admin/apps/sbc-inbound && npm ci --unsafe-perm
+echo "building sbc-outbound.."
+cd /home/admin/apps/sbc-outbound && npm ci --unsafe-perm
+echo "building sbc-call-router.."
+cd /home/admin/apps/sbc-call-router && npm ci --unsafe-perm
+echo "building jambonz-smpp-esme.."
+cd /home/admin/apps/jambonz-smpp-esme && npm ci --unsafe-perm
+echo "building sbc-rtpengine-sidecar.."
+cd /home/admin/apps/sbc-rtpengine-sidecar && npm ci --unsafe-perm
+
+sudo npm install -g pino-pretty pm2 pm2-logrotate gulp grunt
+sudo pm2 install pm2-logrotate
+
+echo "0 * * * * root fsw-clear-old-calls --password JambonzR0ck$ >> /var/log/fsw-clear-old-calls.log 2>&1" | sudo tee -a /etc/crontab
+echo "0 1 * * * root find /tmp -name \"*.mp3\" -mtime +2 -exec rm {} \; > /dev/null 2>&1" | sudo tee -a /etc/crontab
+
+sudo -u admin bash -c "pm2 install pm2-logrotate"
+sudo -u admin bash -c "pm2 set pm2-logrotate:max_size 1G"
+sudo -u admin bash -c "pm2 set pm2-logrotate:retain 5"
+sudo -u admin bash -c "pm2 set pm2-logrotate:compress true"
+
+sudo chown -R admin:admin /home/admin/apps
+
+sudo rm /home/admin/apps/jambonz-webapp/.env
+
+sudo snap install core
+sudo snap install --classic certbot
+sudo rm /usr/bin/certbot
+sudo ln -s /snap/bin/certbot /usr/bin/certbot
diff --git a/packer/jambonz-mini/proxmox/scripts/install_chrony.sh b/packer/jambonz-mini/proxmox/scripts/install_chrony.sh
new file mode 100644
index 0000000..caece01
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_chrony.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo apt-get update
+sudo apt-get install -y chrony
+sudo systemctl enable chrony
diff --git a/packer/jambonz-mini/proxmox/scripts/install_drachtio.sh b/packer/jambonz-mini/proxmox/scripts/install_drachtio.sh
new file mode 100644
index 0000000..8d8429e
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_drachtio.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+VERSION=$1
+
+echo "drachtio version to install is ${VERSION}"
+
+chmod 0777 /usr/local/src
+cd /usr/local/src
+git clone https://github.com/drachtio/drachtio-server.git -b ${VERSION}
+cd drachtio-server
+git submodule update --init --recursive
+./autogen.sh && mkdir -p build && cd $_ && ../configure --enable-tcmalloc=yes CPPFLAGS='-DNDEBUG -g -O2' && make -j 4 && sudo make install
+
+echo "installing drachtio"
+sudo mv /tmp/drachtio.service /etc/systemd/system
+sudo mv /tmp/drachtio-5070.service /etc/systemd/system
+
+sudo mv /tmp/drachtio.conf.xml /etc
+sudo chmod 644 /etc/drachtio.conf.xml
+sudo chmod 644 /etc/systemd/system/drachtio.service
+sudo systemctl enable drachtio
+sudo systemctl restart drachtio
+sudo systemctl status drachtio.service
+
+sudo mv /tmp/drachtio-5070.conf.xml /etc
+sudo chmod 644 /etc/drachtio-5070.conf.xml
+sudo chmod 644 /etc/systemd/system/drachtio-5070.service
+sudo systemctl enable drachtio-5070
+sudo systemctl restart drachtio-5070
diff --git a/packer/jambonz-mini/proxmox/scripts/install_fail2ban.sh b/packer/jambonz-mini/proxmox/scripts/install_fail2ban.sh
new file mode 100644
index 0000000..cdf4034
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_fail2ban.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+sudo cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
+
+# comment out: overhead too high and apiban suffices
+#sudo bash -c "cat >> /etc/fail2ban/jail.local" << EOF
+
+
+#[drachtio-tcp]
+#maxretry = 1
+#bantime = 86400
+#enabled = true
+#filter = drachtio
+#port = 5060
+#protocol = tcp
+#logpath = /var/log/drachtio/drachtio.log
+#
+#[drachtio-udp]
+#maxretry = 1
+#bantime = 86400
+#enabled = true
+#filter = drachtio
+#port = 5060
+#protocol = udp
+#logpath = /var/log/drachtio/drachtio.log
+#
+#EOF
+
+#sudo cp /tmp/drachtio-fail2ban.conf /etc/fail2ban/filter.d/drachtio.conf
+#sudo chmod 0644 /etc/fail2ban/filter.d/drachtio.conf
+
+# add nginx jails and filters
+sudo cp /tmp/nginx-noscript.jail /etc/fail2ban/jail.d/nginx-noscript.conf
+sudo cp /tmp/nginx-noproxy.jail /etc/fail2ban/jail.d/nginx-noproxy.conf
+sudo cp /tmp/nginx-badbots.jail /etc/fail2ban/jail.d/nginx-badbots.conf
+
+sudo cp /tmp/nginx-noscript.filter /etc/fail2ban/filter.d/nginx-noscript.conf
+sudo cp /tmp/nginx-noproxy.filter /etc/fail2ban/filter.d/nginx-noproxy.conf
+sudo cp /tmp/nginx-badbots.filter /etc/fail2ban/filter.d/nginx-badbots.conf
+
+sudo chmod 0644 /etc/fail2ban/jail.d/*.conf
+sudo chmod 0644 /etc/fail2ban/filter.d/*.conf
+
+sudo systemctl enable fail2ban
+sudo systemctl restart fail2ban
diff --git a/packer/jambonz-mini/proxmox/scripts/install_freeswitch.sh b/packer/jambonz-mini/proxmox/scripts/install_freeswitch.sh
new file mode 100644
index 0000000..f2dfe82
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_freeswitch.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+FREESWITCH_VERSION=v1.10.5
+GRPC_VERSION=c66d2cc
+#GRPC_VERSION=v1.39.1
+#GOOGLE_API_VERSION=v1p1beta1-speech
+GOOGLE_API_VERSION=e9da6f8b469c52b83f900e820be30762e9e05c57
+AWS_SDK_VERSION=1.8.129
+LWS_VERSION=v3.2.3
+MODULES_VERSION=v0.6.15
+
+echo "freeswitch version to install is ${FREESWITCH_VERSION}"
+echo "drachtio modules version to install is ${MODULES_VERSION}"
+echo "GRPC version to install is ${GRPC_VERSION}"
+echo "GOOGLE_API_VERSION version to install is ${GOOGLE_API_VERSION}"
+echo "AWS_SDK_VERSION version to install is ${AWS_SDK_VERSION}"
+echo "LWS_VERSION version to install is ${LWS_VERSION}"
+
+export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
+
+cd /tmp
+tar xvfz SpeechSDK-Linux-1.26.0.tar.gz
+cd SpeechSDK-Linux-1.26.0
+sudo cp -r include /usr/local/include/MicrosoftSpeechSDK
+sudo cp -r lib/ /usr/local/lib/MicrosoftSpeechSDK
+if [ "$ARCH" == "arm64" ]; then
+ echo installing Microsoft arm64 libs...
+ sudo cp /usr/local/lib/MicrosoftSpeechSDK/arm64/libMicrosoft.*.so /usr/local/lib/
+ echo done
+fi
+if [ "$ARCH" == "amd64" ]; then
+ echo installing Microsoft x64 libs...
+ sudo cp /usr/local/lib/MicrosoftSpeechSDK/x64/libMicrosoft.*.so /usr/local/lib/
+ echo done
+fi
+
+cd /usr/local/src
+echo remove SpeechSDK-Linux-1.24.2
+sudo rm -Rf /tmp/SpeechSDK-Linux-1.24.2.tgz /tmp/SpeechSDK-Linux-1.24.2
+echo done
+
+echo config git
+git config --global pull.rebase true
+echo done
+git clone https://github.com/signalwire/freeswitch.git -b ${FREESWITCH_VERSION}
+git clone https://github.com/warmcat/libwebsockets.git -b ${LWS_VERSION}
+git clone https://github.com/drachtio/drachtio-freeswitch-modules.git -b ${MODULES_VERSION}
+git clone https://github.com/grpc/grpc -b master
+cd grpc && git checkout ${GRPC_VERSION} && cd ..
+
+cd freeswitch/libs
+git clone https://github.com/drachtio/nuance-asr-grpc-api.git -b main
+git clone https://github.com/drachtio/riva-asr-grpc-api.git -b main
+git clone https://github.com/drachtio/soniox-asr-grpc-api.git -b main
+git clone https://github.com/freeswitch/spandsp.git -b master
+git clone https://github.com/freeswitch/sofia-sip.git -b master
+git clone https://github.com/dpirch/libfvad.git
+git clone https://github.com/aws/aws-sdk-cpp.git -b ${AWS_SDK_VERSION}
+git clone https://github.com/googleapis/googleapis -b master
+cd googleapis && git checkout ${GOOGLE_API_VERSION} && cd ..
+git clone https://github.com/awslabs/aws-c-common.git
+
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_audio_fork /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_aws_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_aws_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_azure_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_azure_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_aws_lex /usr/local/src/freeswitch/src/mod/applications/mod_aws_lex
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_deepgram_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_deepgram_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_google_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_google_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_ibm_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_ibm_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_nuance_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_nuance_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_nvidia_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_nvidia_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_soniox_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_soniox_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_jambonz_transcribe /usr/local/src/freeswitch/src/mod/applications/mod_jambonz_transcribe
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_google_tts /usr/local/src/freeswitch/src/mod/applications/mod_google_tts
+sudo cp -r /usr/local/src/drachtio-freeswitch-modules/modules/mod_dialogflow /usr/local/src/freeswitch/src/mod/applications/mod_dialogflow
+
+sudo sed -i -r -e 's/(.*AM_CFLAGS\))/\1 -g -O0/g' /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork/Makefile.am
+sudo sed -i -r -e 's/(.*-std=c++11)/\1 -g -O0/g' /usr/local/src/freeswitch/src/mod/applications/mod_audio_fork/Makefile.am
+
+# copy Makefiles and patches into place
+cp /tmp/configure.ac.extra /usr/local/src/freeswitch/configure.ac
+cp /tmp/Makefile.am.extra /usr/local/src/freeswitch/Makefile.am
+cp /tmp/modules.conf.in.extra /usr/local/src/freeswitch/build/modules.conf.in
+cp /tmp/modules.conf.vanilla.xml.extra /usr/local/src/freeswitch/conf/vanilla/autoload_configs/modules.conf.xml
+cp /tmp/avmd.conf.xml /usr/local/src/freeswitch/conf/vanilla/autoload_configs/avmd_conf.xml
+cp /tmp/switch_rtp.c.patch /usr/local/src/freeswitch/src
+cp /tmp/switch_core_media.c.patch /usr/local/src/freeswitch/src
+cp /tmp/mod_avmd.c.patch /usr/local/src/freeswitch/src/mod/applications/mod_avmd
+cp /tmp/mod_httapi.c.patch /usr/local/src/freeswitch/src/mod/applications/mod_httapi
+
+# patch freeswitch
+cd /usr/local/src/freeswitch/src
+patch < switch_rtp.c.patch
+patch < switch_core_media.c.patch
+cd /usr/local/src/freeswitch/src/mod/applications/mod_avmd
+patch < mod_avmd.c.patch
+cd /usr/local/src/freeswitch/src/mod/applications/mod_httapi
+patch < mod_httapi.c.patch
+
+# build libwebsockets
+cd /usr/local/src/libwebsockets
+sudo mkdir -p build && cd build && sudo cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo && sudo make && sudo make install
+
+# build libfvad
+cd /usr/local/src/freeswitch/libs/libfvad
+sudo autoreconf -i && sudo ./configure && sudo make -j 4 && sudo make install
+
+# build spandsp
+cd /usr/local/src/freeswitch/libs/spandsp
+./bootstrap.sh && ./configure && make -j 4 && sudo make install
+
+# build sofia
+cd /usr/local/src/freeswitch/libs/sofia-sip
+./bootstrap.sh && ./configure && make -j 4 && sudo make install
+
+# build aws-c-common
+cd /usr/local/src/freeswitch/libs/aws-c-common
+mkdir -p build && cd build
+cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS="-Wno-unused-parameter"
+make -j 4 && sudo make install
+
+# build aws-sdk-cpp
+cd /usr/local/src/freeswitch/libs/aws-sdk-cpp
+mkdir -p build && cd build
+cmake .. -DBUILD_ONLY="lexv2-runtime;transcribestreaming" -DCMAKE_BUILD_TYPE=RelWithDebInfo -DBUILD_SHARED_LIBS=OFF -DCMAKE_CXX_FLAGS="-Wno-unused-parameter"
+make -j 4 && sudo make install
+
+# build grpc
+cd /usr/local/src/grpc
+git submodule update --init --recursive
+mkdir -p cmake/build
+cd cmake/build
+cmake -DBUILD_SHARED_LIBS=ON -DgRPC_SSL_PROVIDER=package -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo ../..
+make -j 4
+sudo make install
+
+# build googleapis
+cd /usr/local/src/freeswitch/libs/googleapis
+echo "Ref: https://github.com/GoogleCloudPlatform/cpp-samples/issues/113"
+sed -i 's/\$fields/fields/' google/maps/routes/v1/route_service.proto
+sed -i 's/\$fields/fields/' google/maps/routes/v1alpha/route_service.proto
+LANGUAGE=cpp make -j 4
+
+# build nuance protobufs
+echo "building protobuf stubs for Nuance asr"
+cd /usr/local/src/freeswitch/libs/nuance-asr-grpc-api
+LANGUAGE=cpp make
+
+# build nvidia protobufs
+echo "building protobuf stubs for nvidia riva asr"
+cd /usr/local/src/freeswitch/libs/riva-asr-grpc-api
+LANGUAGE=cpp make
+
+# build soniox protobufs
+echo "building protobuf stubs for sonioxasr"
+cd /usr/local/src/freeswitch/libs/soniox-asr-grpc-api
+LANGUAGE=cpp make
+
+# build freeswitch
+echo "building freeswitch"
+cd /usr/local/src/freeswitch
+sudo ./bootstrap.sh -j
+sudo ./configure --enable-tcmalloc=yes --with-lws=yes --with-extra=yes
+sudo make -j 4
+sudo make install
+sudo make cd-sounds-install cd-moh-install
+sudo cp /tmp/acl.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/event_socket.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/switch.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/conference.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo rm -Rf /usr/local/freeswitch/conf/dialplan/*
+sudo rm -Rf /usr/local/freeswitch/conf/sip_profiles/*
+sudo cp /tmp/mrf_dialplan.xml /usr/local/freeswitch/conf/dialplan
+sudo cp /tmp/mrf_sip_profile.xml /usr/local/freeswitch/conf/sip_profiles
+sudo cp /usr/local/src/freeswitch/conf/vanilla/autoload_configs/modules.conf.xml /usr/local/freeswitch/conf/autoload_configs
+sudo cp /tmp/freeswitch.service /etc/systemd/system
+sudo chown root:root -R /usr/local/freeswitch
+sudo chmod 644 /etc/systemd/system/freeswitch.service
+sudo sed -i -e 's/global_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8/global_codec_prefs=PCMU,PCMA,OPUS,G722/g' /usr/local/freeswitch/conf/vars.xml
+sudo sed -i -e 's/outbound_codec_prefs=OPUS,G722,PCMU,PCMA,H264,VP8/outbound_codec_prefs=PCMU,PCMA,OPUS,G722/g' /usr/local/freeswitch/conf/vars.xml
+sudo systemctl enable freeswitch
+sudo cp /tmp/freeswitch_log_rotation /etc/cron.daily/freeswitch_log_rotation
+sudo chown root:root /etc/cron.daily/freeswitch_log_rotation
+sudo chmod a+x /etc/cron.daily/freeswitch_log_rotation
+
+echo "downloading soniox root verification certificate"
+cd /usr/local/freeswitch/certs
+wget https://raw.githubusercontent.com/grpc/grpc/master/etc/roots.pem
+
diff --git a/packer/jambonz-mini/proxmox/scripts/install_grafana.sh b/packer/jambonz-mini/proxmox/scripts/install_grafana.sh
new file mode 100644
index 0000000..0dde8f6
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_grafana.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+if [ "$1" = "yes" ]; then
+
+curl -sL https://packages.grafana.com/gpg.key | sudo apt-key add -
+echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee /etc/apt/sources.list.d/grafana.list
+sudo apt-get update
+sudo apt-get install -y grafana
+
+# move to port 3010
+sudo sed -i -e "s/;http_port = 3000/http_port = 3010/g" /etc/grafana/grafana.ini
+
+sudo mkdir /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-default.yaml /etc/grafana/provisioning/dashboards/default.yaml
+sudo mv /tmp/grafana-datasource.yml /etc/grafana/provisioning/datasources/datasource.yml
+
+sudo mv /tmp/grafana-dashboard-heplify.json /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-jambonz.json /var/lib/grafana/dashboards
+sudo mv /tmp/grafana-dashboard-servers.json /var/lib/grafana/dashboards
+
+sudo chown -R grafana:grafana /var/lib/grafana/dashboards
+sudo chown -R grafana:grafana /etc/grafana/provisioning/dashboards
+
+sudo systemctl enable grafana-server
+sudo systemctl start grafana-server
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_homer.sh b/packer/jambonz-mini/proxmox/scripts/install_homer.sh
new file mode 100644
index 0000000..14717c6
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_homer.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+if [ "$1" == "yes" ]; then
+
+DB_USER=$2
+DB_PASS=$3
+
+curl -s https://packagecloud.io/install/repositories/qxip/sipcapture/script.deb.sh | sudo bash
+sudo apt-get install -y homer-app heplify-server
+
+sudo cp /usr/local/homer/etc/webapp_config.json.example /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/homer_user/$DB_USER/g" /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/homer_password/$DB_PASS/g" /usr/local/homer/etc/webapp_config.json
+sudo sed -i -e "s/localhost/127.0.0.1/g" /usr/local/homer/etc/webapp_config.json
+sudo homer-app -create-table-db-config
+sudo homer-app -populate-table-db-config
+sudo sed -i -e "s/DBUser\s*=\s*\"postgres\"/DBUser = \"$DB_USER\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/DBPass\s*=\s*\"\"/DBPass = \"$DB_PASS\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/PromAddr\s*=\s*\".*\"/PromAddr = \"0.0.0.0:9098\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/HEPWSAddr\s*=\s*\".*\"/HEPWSAddr = \"0.0.0.0:3050\"/g" /etc/heplify-server.toml
+sudo sed -i -e "s/AlegIDs\s*=\s*\[\]/AlegIDs = \[\"X-CID\"]/g" /etc/heplify-server.toml
+sudo sed -i -e "s/CustomHeader\s*=\s*\[\]/CustomHeader = \[\"X-Application-Sid\", \"X-Originating-Carrier\", \"X-MS-Teams-Tenant-FQDN\", \"X-Authenticated-User\"]/g" /etc/heplify-server.toml
+
+sudo systemctl enable homer-app
+sudo systemctl restart homer-app
+sudo systemctl status homer-app
+
+sudo systemctl enable heplify-server
+sudo systemctl restart heplify-server
+sudo systemctl status heplify-server
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_influxdb.sh b/packer/jambonz-mini/proxmox/scripts/install_influxdb.sh
new file mode 100644
index 0000000..c82d28d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_influxdb.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+sudo apt-get install -y apt-transport-https
+curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
+echo "deb https://repos.influxdata.com/debian buster stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
+sudo apt-get update
+sudo apt-get install -y influxdb
+sudo chmod a+x /usr/lib/influxdb/scripts/influxd-systemd-start.sh
+sudo systemctl enable influxdb
+sudo systemctl start influxdb
+
+sudo systemctl status influxdb.service
+sudo journalctl -xe
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_jaeger.sh b/packer/jambonz-mini/proxmox/scripts/install_jaeger.sh
new file mode 100644
index 0000000..5af8e1c
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_jaeger.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+cd /tmp
+
+echo "installing jaeger"
+wget https://github.com/jaegertracing/jaeger/releases/download/v1.33.0/jaeger-1.33.0-linux-amd64.tar.gz
+sudo tar xvfz jaeger-1.33.0-linux-amd64.tar.gz
+sudo cp jaeger-1.33.0-linux-amd64/jaeger-all-in-one /usr/local/bin
+
+sudo cp jaeger.service /etc/systemd/system
+sudo chmod 644 /etc/systemd/system/jaeger.service
+sudo systemctl enable jaeger
+sudo systemctl start jaeger
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_mysql.sh b/packer/jambonz-mini/proxmox/scripts/install_mysql.sh
new file mode 100644
index 0000000..253857e
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_mysql.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+DB_USER=$1
+DB_PASS=$2
+
+sudo apt install -y dirmngr
+sudo apt-key add - < /tmp/mysql-server.key
+echo "deb http://repo.mysql.com/apt/debian $(lsb_release -sc) mysql-8.0" | sudo tee /etc/apt/sources.list.d/mysql80.list
+sudo apt update
+sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/root-pass password JambonzR0ck\$"
+sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/re-root-pass password JambonzR0ck\$"
+sudo debconf-set-selections <<< "mysql-community-server mysql-server/default-auth-override select Use Legacy Authentication Method (Retain MySQL 5.x Compatibility)"
+sudo DEBIAN_FRONTEND=noninteractive apt install -y default-mysql-server
+#cd /etc/systemd/system
+#rm mysql.service
+#sudo systemctl enable mysql
+echo "starting mysql"
+sudo systemctl start mysql
+echo "creating database"
+
+# create the database and the user
+mysql -h localhost -u root -pJambonzR0ck\$ << END
+create database jambones;
+SET old_passwords=0;
+create user $DB_USER@'%' IDENTIFIED BY '$DB_PASS';
+grant all on jambones.* to $DB_USER@'%' with grant option;
+grant create user on *.* to $DB_USER@'%' with grant option;
+flush privileges;
+END
+
+# create the schema
+echo "creating schema"
+mysql -h localhost -u $DB_USER -p$DB_PASS -D jambones < /home/admin/apps/jambonz-api-server/db/jambones-sql.sql
+echo "seeding initial data"
+mysql -h localhost -u $DB_USER -p$DB_PASS -D jambones < /home/admin/apps/jambonz-api-server/db/seed-production-database-open-source.sql
+
diff --git a/packer/jambonz-mini/proxmox/scripts/install_nginx.sh b/packer/jambonz-mini/proxmox/scripts/install_nginx.sh
new file mode 100644
index 0000000..4368b3e
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_nginx.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+echo "installing nginx"
+
+sudo apt-get install -y nginx
+
+echo "installing apache utils for htpasswd"
+sudo apt-get install -y apache2-utils
+
+cd /etc/nginx/sites-available
+sudo mv /tmp/nginx.default default
+
+sudo systemctl enable nginx
+sudo systemctl restart nginx
+
+sudo systemctl status nginx
+sudo journalctl -xe
diff --git a/packer/jambonz-mini/proxmox/scripts/install_node_red.sh b/packer/jambonz-mini/proxmox/scripts/install_node_red.sh
new file mode 100644
index 0000000..2433ef1
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_node_red.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+#install node-red
+mkdir -p apps && cd $_
+git clone https://github.com/node-red/node-red.git
+cd node-red
+sudo npm install --unsafe-perm
+grunt build
+
+sudo mv /tmp/ecosystem.config.js /home/admin/apps
+sudo chown -R admin:admin /home/admin/apps
+
+sudo -u admin bash -c "pm2 start /home/admin/apps/ecosystem.config.js"
+sudo env PATH=$PATH:/usr/bin /usr/lib/node_modules/pm2/bin/pm2 startup systemd -u admin --hp /home/admin
+sudo -u admin bash -c "pm2 save"
+sudo systemctl enable pm2-admin.service
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_nodejs.sh b/packer/jambonz-mini/proxmox/scripts/install_nodejs.sh
new file mode 100644
index 0000000..d8de438
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_nodejs.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+curl -sL https://deb.nodesource.com/setup_18.x | sudo bash - && sudo apt-get install -y nodejs
+sudo npm install -g npm@latest
+node -v
+npm -v
+sudo ls -lrt /root/.npm/
+sudo ls -lrt /root/.npm/_logs
+sudo ls -lrt /root/.npm/_cacache
+sudo chmod -R a+wx /root
+sudo chown -R 1000:1000 /root/.npm
+ls -lrt /root/.npm/
+ls -lrt /root/.npm/_logs
+ls -lrt /root/.npm/_cacache
diff --git a/packer/jambonz-mini/proxmox/scripts/install_os_tuning.sh b/packer/jambonz-mini/proxmox/scripts/install_os_tuning.sh
new file mode 100755
index 0000000..3a4585d
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_os_tuning.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+sudo sed -i '/# End of file/i * hard nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i * soft nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i root hard nofile 65535' /etc/security/limits.conf
+sudo sed -i '/# End of file/i root soft nofile 65535' /etc/security/limits.conf
+sudo sed -i s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/g /etc/systemd/system.conf
+sudo sed -i s/#DefaultLimitNOFILE=/DefaultLimitNOFILE=65535/g /etc/systemd/user.conf
+
+sudo bash -c 'cat >> /etc/sysctl.conf << EOT
+net.ipv6.conf.all.disable_ipv6 = 1
+net.ipv6.conf.default.disable_ipv6 = 1
+net.ipv6.conf.lo.disable_ipv6 = 1
+net.core.rmem_max=26214400
+net.core.rmem_default=26214400
+vm.swappiness=0
+vm.dirty_expire_centisecs=200
+vm.dirty_writeback_centisecs=100
+EOT'
+
+sudo cp /tmp/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_postgresql.sh b/packer/jambonz-mini/proxmox/scripts/install_postgresql.sh
new file mode 100644
index 0000000..8b96d3f
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_postgresql.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+DB_USER=$2
+DB_PASS=$3
+
+wget -q https://www.postgresql.org/media/keys/ACCC4CF8.asc -O- | sudo apt-key add -
+sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/postgresql.list'
+sudo apt-get update
+sudo apt-get install -y postgresql-12
+sudo systemctl daemon-reload
+sudo systemctl enable postgresql
+sudo systemctl restart postgresql
+
+sudo -u postgres psql -c "CREATE DATABASE homer_config;"
+sudo -u postgres psql -c "CREATE DATABASE homer_data;"
+sudo -u postgres psql -c "CREATE ROLE ${DB_USER} WITH SUPERUSER LOGIN PASSWORD '$DB_PASS';"
+sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE homer_config to ${DB_USER};"
+sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE homer_data to ${DB_USER};"
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/scripts/install_redis.sh b/packer/jambonz-mini/proxmox/scripts/install_redis.sh
new file mode 100644
index 0000000..7a36e91
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_redis.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo apt-get install -y redis-server
+sudo systemctl enable redis-server
+sudo systemctl restart redis-server
diff --git a/packer/jambonz-mini/proxmox/scripts/install_rtpengine.sh b/packer/jambonz-mini/proxmox/scripts/install_rtpengine.sh
new file mode 100644
index 0000000..164eac1
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_rtpengine.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+VERSION=$1
+
+echo "rtpengine version to install is ${VERSION}, cloud provider is $2"
+
+cd /usr/local/src
+git clone https://github.com/BelledonneCommunications/bcg729.git
+cd bcg729
+cmake . -DCMAKE_INSTALL_PREFIX=/usr && make && sudo make install chdir=/usr/local/src/bcg729
+cd /usr/local/src
+
+git clone https://github.com/warmcat/libwebsockets.git -b v3.2.3
+cd /usr/local/src/libwebsockets
+sudo mkdir -p build && cd build && sudo cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo && sudo make && sudo make install
+
+cd /usr/local/src
+git clone https://github.com/sipwise/rtpengine.git -b ${VERSION}
+cd rtpengine
+make -j 4 with_transcoding=yes with_iptables_option=yes with-kernel
+
+# copy iptables extension into place
+cp ./iptables-extension/libxt_RTPENGINE.so `pkg-config xtables --variable=xtlibdir`
+
+# install kernel module
+mkdir /lib/modules/`uname -r`/updates/
+cp ./kernel-module/xt_RTPENGINE.ko /lib/modules/`uname -r`/updates
+depmod -a
+modprobe xt_RTPENGINE
+cat << EOF >> /etc/modules
+xt_RTPENGINE
+EOF
+
+echo 'add 42' > /proc/rtpengine/control
+iptables -I INPUT -p udp --dport 40000:60000 -j RTPENGINE --id 42
+
+if [ "$2" = "gcp" ]; then
+ echo "installing rtpengine for gcp"
+ sudo mv /tmp/rtpengine.gcp.service /etc/systemd/system/rtpengine.service
+else
+ echo "installing rtpengine"
+ sudo mv /tmp/rtpengine.service /etc/systemd/system/rtpengine.service
+fi
+
+cp /usr/local/src/rtpengine/daemon/rtpengine /usr/local/bin
+cp /usr/local/src/rtpengine/recording-daemon/rtpengine-recording /usr/local/bin/
+sudo mv /tmp/rtpengine-recording.service /etc/systemd/system
+sudo mv /tmp/rtpengine-recording.ini /etc/rtpengine-recording.ini
+sudo chmod 644 /etc/systemd/system/rtpengine.service
+sudo chmod 644 /etc/systemd/system/rtpengine-recording.service
+sudo chmod 644 /etc/rtpengine-recording.ini
+mkdir -p /var/spool/recording
+mkdir -p /recording
+sudo systemctl enable rtpengine
+sudo systemctl enable rtpengine-recording
+sudo systemctl start rtpengine
+sudo systemctl start rtpengine-recording
diff --git a/packer/jambonz-mini/proxmox/scripts/install_telegraf.sh b/packer/jambonz-mini/proxmox/scripts/install_telegraf.sh
new file mode 100644
index 0000000..48e60ea
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/scripts/install_telegraf.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ "$1" == "yes" ]; then
+
+INFLUXDB_IP=$2
+
+cd /tmp
+wget -q https://repos.influxdata.com/influxdata-archive_compat.key
+gpg --with-fingerprint --show-keys ./influxdata-archive_compat.key
+cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null
+echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list
+
+#curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add -
+#curl -sLhttps://repos.influxdata.com/influxdata-archive_compat.key | sudo apt-key add -
+#echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list
+
+sudo apt-get update
+sudo apt-get install -y telegraf
+
+sudo cp /tmp/telegraf.conf /etc/telegraf/telegraf.conf
+sudo sed -i -e "s/influxdb:8086/$INFLUXDB_IP:8086/g" /etc/telegraf/telegraf.conf
+
+sudo systemctl enable telegraf
+sudo systemctl start telegraf
+
+fi
\ No newline at end of file
diff --git a/packer/jambonz-mini/proxmox/template.json b/packer/jambonz-mini/proxmox/template.json
new file mode 100644
index 0000000..f31242b
--- /dev/null
+++ b/packer/jambonz-mini/proxmox/template.json
@@ -0,0 +1,180 @@
+{
+ "variables": {
+ "proxmox_url": "{{user `proxmox_url`}}",
+ "proxmox_user": "{{user `proxmox_user`}}",
+ "proxmox_password": "{{user `proxmox_password`}}",
+ "proxmox_node": "{{user `proxmox_node`}}",
+ "proxmox_clone_vm": "{{user `proxmox_clone_vm`}}",
+ "proxmox_vm_id": "{{user `proxmox_vm_id`}}",
+ "proxmox_source_vm_ssh_username": "admin",
+ "proxmox_source_vm_private_key_file": "~/.ssh/id_rsa",
+ "proxmox_memory": "8192",
+ "proxmox_cores": "4",
+ "proxmox_bridge": "{{user `proxmox_bridge`}}",
+ "proxmox_ip": "{{user `proxmox_ip`}}",
+ "proxmox_gateway": "{{user `proxmox_gateway`}}",
+ "drachtio_version": "v0.8.22",
+ "jambonz_version": "v0.8.3-2",
+ "jambonz_user": "admin",
+ "jambonz_password": "JambonzR0ck$",
+ "install_telegraf": "yes",
+ "homer_user": "homer_user",
+ "homer_password": "XcapJTqy11LnsYRtxXGPTYQkAnI",
+ "install_influxdb": "yes",
+ "install_homer": "yes",
+ "install_jaeger": "yes",
+ "install_nodered": "no",
+ "influxdb_ip": "127.0.0.1",
+ "rtp_engine_version": "mr11.2.1.5",
+ "rtp_engine_min_port": "40000",
+ "rtp_engine_max_port": "60000",
+ "mediaserver_name": "jambonz",
+ "preferred_codec_list": "PCMU,PCMA,OPUS,G722"
+ },
+ "builders": [{
+ "type": "proxmox-clone",
+ "proxmox_url": "{{user `proxmox_url`}}",
+ "username": "{{user `proxmox_user`}}",
+ "password": "{{user `proxmox_password`}}",
+ "node": "{{user `proxmox_node`}}",
+ "clone_vm": "{{user `proxmox_clone_vm`}}",
+ "vm_id": "{{user `proxmox_vm_id`}}",
+ "memory": "{{user `proxmox_memory`}}",
+ "cores": "{{user `proxmox_cores`}}",
+ "insecure_skip_tls_verify": true,
+ "ssh_username": "{{user `proxmox_source_vm_ssh_username`}}",
+ "ssh_private_key_file": "{{user `proxmox_source_vm_private_key_file`}}",
+ "template_name": "jambonz-mini-{{user `jambonz_version`}}-template",
+ "nameserver": "8.8.8.8",
+ "cloud_init": true,
+ "network_adapters": [
+ {
+ "bridge": "{{user `proxmox_bridge`}}"
+ }
+ ],
+ "ipconfig": [
+ {
+ "ip": "{{user `proxmox_ip`}}",
+ "gateway": "{{user `proxmox_gateway`}}"
+ }
+ ]
+ }],
+ "provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done",
+ "sudo apt-get update",
+ "sudo apt-get remove --auto-remove nftables",
+ "sudo apt-get purge nftables",
+ "sudo apt-get -y install python lsof gcc g++ make cmake build-essential git autoconf automake default-mysql-client redis-tools \\",
+ "curl argon2 telnet libtool libtool-bin libssl-dev libcurl4-openssl-dev libz-dev systemd-coredump liblz4-tool \\",
+ "libxtables-dev libip6tc-dev libip4tc-dev libiptc-dev linux-headers-$(uname -r) libavformat-dev liblua5.1-0-dev libavfilter-dev libavcodec-dev libswresample-dev \\",
+ "libevent-dev libpcap-dev libxmlrpc-core-c3-dev markdown libjson-glib-dev lsb-release \\",
+ "libhiredis-dev gperf libspandsp-dev default-libmysqlclient-dev htop dnsutils gdb \\",
+ "gnupg2 wget pkg-config ca-certificates libjpeg-dev libsqlite3-dev libpcre3-dev libldns-dev snapd \\",
+ "libspeex-dev libspeexdsp-dev libedit-dev libtiff-dev yasm libswscale-dev haveged jq fail2ban \\",
+ "libopus-dev libsndfile-dev libshout3-dev libmpg123-dev libmp3lame-dev libopusfile-dev libgoogle-perftools-dev",
+ "sudo chmod a+w /usr/local/src",
+ "mkdir ~/apps",
+ "cd ~/apps",
+ "git config --global advice.detachedHead false",
+ "git clone https://github.com/jambonz/sbc-call-router.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/fsw-clear-old-calls.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/sbc-outbound.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/sbc-inbound.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/sbc-sip-sidecar.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/jambonz-feature-server.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/jambonz-api-server.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/jambonz-webapp.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/jambonz-smpp-esme.git -b {{user `jambonz_version`}}",
+ "git clone https://github.com/jambonz/sbc-rtpengine-sidecar.git -b {{user `jambonz_version`}}"
+ ]
+ },
+ {
+ "type": "file",
+ "source": "files/",
+ "destination": "/tmp"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `rtp_engine_version`}}",
+ "script": "scripts/install_rtpengine.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `drachtio_version`}}",
+ "script": "scripts/install_drachtio.sh"
+ },
+ {
+ "type": "shell",
+ "script": "scripts/install_nodejs.sh"
+ },
+ {
+ "type": "shell",
+ "environment_vars": [
+ "ARCH=amd64",
+ "MEDIA_SERVER_NAME={{user `mediaserver_name`}}",
+ "PREFERRED_CODEC_LIST={{user `preferred_codec_list`}}"
+ ],
+ "script": "scripts/install_freeswitch.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_influxdb`}}",
+ "script": "scripts/install_influxdb.sh"
+ },
+ {
+ "type": "shell",
+ "script": "scripts/install_nginx.sh"
+ },
+ {
+ "type": "shell",
+ "script": "scripts/install_redis.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_homer`}} {{user `homer_user`}} {{user `homer_password`}}",
+ "script": "scripts/install_postgresql.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_homer`}} {{user `influxdb_ip`}}",
+ "script": "scripts/install_telegraf.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_influxdb`}}",
+ "script": "scripts/install_grafana.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_jaeger`}}",
+ "script": "scripts/install_jaeger.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `jambonz_version`}} {{user `jambonz_user`}} {{user `jambonz_password`}}",
+ "script": "scripts/install_app.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `jambonz_user`}} {{user `jambonz_password`}}",
+ "script": "scripts/install_mysql.sh"
+ },
+ {
+ "type": "shell",
+ "execute_command": "chmod +x {{ .Path }}; sudo '{{ .Path }}' {{user `install_homer`}} {{user `homer_user`}} {{user `homer_password`}}",
+ "script": "scripts/install_homer.sh"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections",
+ "echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections",
+ "sudo apt-get -y install iptables-persistent",
+ "sudo rm -Rf /tmp/*"
+ ]
+ }
+ ]
+}
diff --git a/terraform/proxmox/.gitignore b/terraform/proxmox/.gitignore
new file mode 100644
index 0000000..a254365
--- /dev/null
+++ b/terraform/proxmox/.gitignore
@@ -0,0 +1,2 @@
+terraform.tfvars
+user_data_*.cfg
\ No newline at end of file
diff --git a/terraform/proxmox/README.md b/terraform/proxmox/README.md
new file mode 100644
index 0000000..67c0572
--- /dev/null
+++ b/terraform/proxmox/README.md
@@ -0,0 +1,42 @@
+# terraform deployment of jambonz-mini VM for Proxmox
+
+A [terraform](https://www.terraform.io/) template to deploy a jambonz-mini server on Proxmox. The VM template should have been built using the associated packer script.
+
+## Prerequisites
+- A Proxmox jambonz-mini VM template built using the associated packer template
+- A Proxmox node with two bridges: one for a private network and one for a public network
+
+The jambonz-mini VM will attach to both networks as a dual-homed server; thus it will have both a public address (as needed to be reachable for SIP, RTP, and HTTP) as well as a private network.
+
+## Installing
+
+```
+terraform init
+```
+to install the Proxmox terraform provider, then you will typically create a `terraform.tfvars` to provide variable values (they can also be provided on the command line). One that is done, then:
+
+```
+terraform plan
+```
+
+If all looks good then
+
+```
+terraform apply
+```
+
+## Variables
+
+- `pm_api_url`: URL of Proxmox GUI api (e.g. https://:8006/api2/json)
+- `pm_user`: Proxmox GUI user
+- `pm_password`: Proxmox GUI password
+- `pm_source_template`: name of VM template (this would have been built using the packer template)
+- `pm_target_node`: Proxmox node name
+- `pm_storage`: storage name (e.g. "local")
+- `pve_host`: IP address of Proxmox node
+- `pve_user`: ssh user for Proxmox node (e.g. "root")
+- `url_portal` = DNS name you will assign to the jambonz-mini VM (the jambonz portal will be served at this URL)
+`ifpconfig_private`: ip and gateway for private network (e.g. "ip=10.200.100.20/24,gw=10.200.100.1")
+`ifpconfig_public`: ip and gateway for public network (e.g. "ip=62.210.101.46/32,gw=62.210.0.1")
+`ssh_pub_key_path`: path to your public ssh key (e.g. "~/.ssh/id_rsa.pub")
+`ssh_private_key_path`: path to your private ssh key (e.g. "~/.ssh/id_rsa")
\ No newline at end of file
diff --git a/terraform/proxmox/files/cloud-init.cloud_config.tftpl b/terraform/proxmox/files/cloud-init.cloud_config.tftpl
new file mode 100644
index 0000000..e19bce7
--- /dev/null
+++ b/terraform/proxmox/files/cloud-init.cloud_config.tftpl
@@ -0,0 +1,109 @@
+#cloud-config
+hostname: ${hostname}
+ssh_authorized_keys:
+ - ${ssh_key}
+write_files:
+ - content: |
+ #!/bin/bash -xe
+
+ # get instance metadata
+ PRIVATE_IPV4="$(/bin/ip -4 addr show eth0 | grep -oP '(?<=inet )\d+(\.\d+){3}' | head -n 1)"
+ PUBLIC_IPV4="$(/usr/bin/curl -s http://ipecho.net/plain)"
+
+ # change the database password to a random id
+ NEW_DB_PASSWD="$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)"
+ echo "alter user 'admin'@'%' identified by '$NEW_DB_PASSWD'" | mysql -h localhost -u admin -D jambones -pJambonzR0ck$
+ sudo sed -i -e "s/\(.*\)JAMBONES_MYSQL_PASSWORD.*/\1JAMBONES_MYSQL_PASSWORD: '$NEW_DB_PASSWD',/g" /home/admin/apps/ecosystem.config.js
+
+ # replace ip addresses in the ecosystem.config.js file
+ sudo sed -i -e "s/\(.*\)PRIVATE_IP\(.*\)/\1$PRIVATE_IPV4\2/g" /home/admin/apps/ecosystem.config.js
+ sudo sed -i -e "s/\(.*\)AWS_REGION_NAME\(.*\)/\1$AWS_REGION_NAME\2/g" /home/admin/apps/ecosystem.config.js
+ sudo sed -i -e "s/\(.*\)--JAMBONES_API_BASE_URL--\(.*\)/\1http:\/\/$PUBLIC_IPV4\/v1\2/g" /home/admin/apps/ecosystem.config.js
+
+ # replace JWT_SECRET
+ uuid=$(uuidgen)
+ sudo sed -i -e "s/\(.*\)JWT-SECRET-GOES_HERE\(.*\)/\1$uuid\2/g" /home/admin/apps/ecosystem.config.js
+
+ # reset the database
+ JAMBONES_ADMIN_INITIAL_PASSWORD=$INSTANCE_ID JAMBONES_MYSQL_USER=admin JAMBONES_MYSQL_PASSWORD=$NEW_DB_PASSWD JAMBONES_MYSQL_DATABASE=jambones JAMBONES_MYSQL_HOST=localhost /home/admin/apps/jambonz-api-server/db/reset_admin_password.js
+
+ if [[ -z "${url_portal}" ]]; then
+ # portals will be accessed by IP address of server
+ echo "VITE_API_BASE_URL=http://$PUBLIC_IPV4/api/v1" > /home/admin/apps/jambonz-webapp/.env
+ API_BASE_URL=http://$PUBLIC_IPV4/api/v1 TAG=""
+ sed -i -e "\@@i\ $TAG" /home/admin/apps/jambonz-webapp/dist/index.html
+ else
+ # portals will be accessed by DNS name
+ echo "VITE_API_BASE_URL=http://${url_portal}/api/v1" > /home/admin/apps/jambonz-webapp/.env
+ API_BASE_URL=http://${url_portal}/api/v1 TAG=""
+ sed -i -e "\@@i\ $TAG" /home/admin/apps/jambonz-webapp/dist/index.html
+ # add row to system information table
+ mysql -h localhost -u admin -D jambones -p$NEW_DB_PASSWD -e $'insert into system_information (domain_name, sip_domain_name, monitoring_domain_name) values ('\'''"${url_portal}"''\'', '\''sip.'"${url_portal}"''\'', '\''grafana.'"${url_portal}"''\'')'
+
+ sudo cat << EOF > /etc/nginx/sites-available/default
+ server {
+ listen 80;
+ server_name ${url_portal};
+ location /api/ {
+ rewrite ^/api/(.*)$ /\$1 break;
+ proxy_pass http://localhost:3002;
+ proxy_set_header Host \$host;
+ }
+ location / {
+ proxy_pass http://localhost:3001;
+ proxy_set_header Host \$host;
+ }
+ }
+ server {
+ listen 80;
+ server_name api.${url_portal};
+ location / {
+ proxy_pass http://localhost:3002;
+ proxy_set_header Host \$host;
+ }
+ }
+ server {
+ listen 80;
+ server_name grafana.${url_portal};
+ location / {
+ proxy_pass http://localhost:3010;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host \$host;
+ proxy_cache_bypass \$http_upgrade;
+ }
+ }
+ server {
+ listen 80;
+ server_name homer.${url_portal};
+ location / {
+ proxy_pass http://localhost:9080;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection 'upgrade';
+ proxy_set_header Host \$host;
+ proxy_cache_bypass \$http_upgrade;
+ }
+ }
+ EOF
+
+ sudo systemctl restart nginx
+ fi
+
+ # restart heplify-server
+ sudo systemctl restart heplify-server
+
+ sudo -u admin bash -c "pm2 restart /home/admin/apps/ecosystem.config.js"
+ sudo -u admin bash -c "pm2 save"
+ sudo env PATH=$PATH:/usr/bin /usr/lib/node_modules/pm2/bin/pm2 startup systemd -u admin --hp /home/admin
+
+ # get an apiban key
+ APIBANKEY=$(curl -X POST -u jambonz:1a074994242182a9e0b67eae93978826 -d "{\"client\": \"$NEW_DB_PASSWD\"}" -s https://apiban.org/sponsor/newkey | jq -r '.ApiKey')
+ sudo sed -i -e "s/API-KEY-HERE/$APIBANKEY/g" /usr/local/bin/apiban/config.json
+ sudo /usr/local/bin/apiban/apiban-iptables-client FULL
+
+ path: /var/tmp/script.sh
+ permissions: '0755'
+runcmd:
+ - /var/tmp/script.sh
diff --git a/terraform/proxmox/main.tf b/terraform/proxmox/main.tf
new file mode 100644
index 0000000..79b0774
--- /dev/null
+++ b/terraform/proxmox/main.tf
@@ -0,0 +1,168 @@
+terraform {
+ required_providers {
+ proxmox = {
+ source = "telmate/proxmox"
+ }
+ }
+}
+
+variable "ssh_pub_key_path" {
+ description = "path to ssh public key"
+ type = string
+}
+variable "ssh_private_key_path" {
+ description = "path to ssh public key"
+ type = string
+}
+variable "pm_api_url" {
+ description = "Proxmox API URL"
+ type = string
+}
+
+variable "url_portal" {
+ description = "DNS name to assign to instance"
+ type = string
+}
+
+variable "ifpconfig_private" {
+ description = "ipconfig for private bridge/interface, e.g. ip=10.200.100.20/24,gw=10.200.100.1"
+ type = string
+}
+
+variable "ifpconfig_public" {
+ description = "ipconfig for public bridge/interface"
+ type = string
+}
+
+variable "pm_user" {
+ description = "Proxmox API user"
+ type = string
+}
+
+variable "pm_password" {
+ description = "Proxmox API password"
+ type = string
+ sensitive = true
+}
+
+variable "pm_tls_insecure" {
+ description = "Skip TLS verification"
+ type = bool
+ default = true
+}
+
+variable "pm_source_template" {
+ description = "jambonz base template to clone"
+ type = string
+}
+
+variable "pm_target_node" {
+ description = "proxmox target node"
+ type = string
+}
+
+variable "pm_storage" {
+ description = "proxmox storage string"
+ type = string
+}
+
+variable "pve_user" {
+ description = "ssh user for proxmox node"
+ type = string
+}
+
+variable "pve_host" {
+ description = "proxmox host IP or name"
+ type = string
+}
+
+variable "nameserver" {
+ description = "nameserver IP address"
+ type = string
+ default = "8.8.8.8"
+}
+
+variable "vm_count" {
+ description = "number of instances to deploy"
+ type = number
+ default = 1
+}
+
+provider "proxmox" {
+ pm_api_url = var.pm_api_url
+ pm_user = var.pm_user
+ pm_password = var.pm_password
+ pm_tls_insecure = var.pm_tls_insecure
+}
+
+resource "local_file" "cloud_init_user_data_file" {
+ count = var.vm_count
+ content = templatefile("${path.module}/files/cloud-init.cloud_config.tftpl", {
+ ssh_key = file(var.ssh_pub_key_path)
+ hostname = var.url_portal
+ url_portal = var.url_portal
+ })
+ filename = "${path.module}/files/user_data_${count.index}.cfg"
+}
+
+resource "null_resource" "cloud_init_config_files" {
+ count = var.vm_count
+ connection {
+ type = "ssh"
+ user = var.pve_user
+ private_key = file(var.ssh_private_key_path)
+ host = var.pve_host
+ }
+
+ provisioner "file" {
+ source = local_file.cloud_init_user_data_file[count.index].filename
+ destination = "/var/lib/vz/snippets/user_data_vm-jambonz-mini-${count.index}.yml"
+ }
+}
+
+resource "proxmox_vm_qemu" "jambonz-mini-v083-1" {
+ depends_on = [
+ null_resource.cloud_init_config_files,
+ ]
+
+ count = var.vm_count
+ name = "jambonz-mini-v083-1"
+ target_node = var.pm_target_node
+ clone = var.pm_source_template
+ full_clone = true
+
+ cores = 4
+ sockets = 1
+ cpu = "host"
+ memory = 8192
+ os_type = "cloud-init"
+
+ network {
+ model = "virtio"
+ bridge = "vmbr30"
+ }
+
+ network {
+ model = "virtio"
+ bridge = "vmbr0"
+ }
+
+ disk {
+ type = "scsi"
+ size = "600G"
+ format = "raw"
+ storage = var.pm_storage
+ }
+
+ ssh_user = "admin"
+ ssh_private_key = file(var.ssh_private_key_path)
+
+ # Cloud Init settings
+ ipconfig0 = var.ifpconfig_private
+ ipconfig1 = var.ifpconfig_public
+ sshkeys = file(var.ssh_pub_key_path)
+ nameserver = var.nameserver
+
+ cicustom = "user=local:snippets/user_data_vm-jambonz-mini-${count.index}.yml"
+
+}