Commit c18e3947 authored by henrik.prangel's avatar henrik.prangel
Browse files

Merge remote-tracking branch 'origin/master' into master

parents f85257b9 faad49c4
......@@ -7,9 +7,10 @@ pipeline {
agent any
parameters {
booleanParam(name: 'Recreate', defaultValue: false, description: 'Delete and create VM')
string(name: 'ElasticSearchVersion', defaultValue: '7.8.0', description: 'ElasticSearch version', trim: true)
string(name: 'ElasticSearchVersion', defaultValue: '7.9.0', description: 'ElasticSearch version', trim: true)
}
environment {
RSYNC = 'rsync -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --exclude ".git"'
SSH = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
SCP = 'scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
TLD = 'riaint.ee'
......@@ -108,15 +109,22 @@ pipeline {
}
}
stage('Start Elasticsearch') {
stage('Update conf and start Elasticsearch') {
when {
expression { params.Recreate == true }
}
steps {
script {
ssh_start_es = '''
sudo systemctl start elasticsearch
'''
set -x
set -e
cd /tmp/
sudo bash ./postinst.sh
sudo systemctl start elasticsearch
'''
sh "$RSYNC -av elasticsearch/deb/confd riajenk@$NODE:/tmp/"
sh "$RSYNC -av elasticsearch/deb/postinst.sh riajenk@$NODE:/tmp/"
sh "$SSH riajenk@${NODE} '$ssh_start_es'"
}
}
......@@ -128,8 +136,9 @@ pipeline {
}
steps {
script {
ELK_HOST = common.host2ip("${NODE}")
ssh_build_cmd = '''
elasticsearchHost="http://localhost:9200"
elasticsearchHost="10.1.15.26:9200"
echo "Elasticsearch health check on ${HOSTNAME}:"
n=0
until [ "$n" -ge 60 ]
......@@ -142,7 +151,6 @@ pipeline {
echo "Elasticsearch did not start on time. Exiting ..."
exit 1
'''
ELK_HOST = common.host2ip("${NODE}")
sh "$SSH riajenk@$ELK_HOST '$ssh_build_cmd'"
}
}
......
[template]
src = "elasticsearch.yml.tmpl"
dest = "/etc/elasticsearch/elasticsearch.yml"
mode = "0640"
uid = 0
gid = 114
keys = [
"jut"
]
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
#cluster.name: my-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
path.data: /var/lib/elasticsearch
#
# Path to log files:
#
path.logs: /var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
network.host: 10.1.15.26
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
discovery.type: single-node
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, consult the gateway module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
#!/bin/bash
mkdir -p /etc/confd
mkdir -p /etc/confd/conf.d
mkdir -p /etc/confd/templates
mv /tmp/confd/conf.d/* /etc/confd/conf.d/
mv /tmp/confd/templates/* /etc/confd/templates/
source /etc/vaultcred && confd \
-onetime \
-prefix $workspace \
-backend vault \
-node $vaulthost \
-auth-type token \
-auth-token $vaulttoken
if [ $? -ne 0 ]
then
echo "Failed to apply configuration, use vault_test.sh to verify connection
to vault is working. If that is working, check confd templates and
configuration. To run it manually use
source /etc/vaultcred && confd -onetime -prefix $workspace
-backend vault -node $vaulthost -auth-type token
-auth-token $vaulttoken
to finish installation properly use apt-get -f install"
exit 1
fi
......@@ -7,8 +7,6 @@ chown kibana:kibana /var/log/kibana
mkdir -p /var/run/kibana
chown kibana:kibana /var/run/kibana
mkdir -p /opt/rig
mkdir -p /etc/confd
mkdir -p /etc/confd/conf.d
mkdir -p /etc/confd/templates
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment