Commit b50585e1 authored by J. Fernando Sánchez's avatar J. Fernando Sánchez
Browse files

k8s deployment

parent bebdaa05
......@@ -2,3 +2,4 @@
.*
*~
bower_components
elasticsearch
[submodule "bower_components/comment-chart"]
path = bower_components/comment-chart
[submodule "comment-chart"]
path = elements/comment-chart
url = ../comment-chart
from node:7.10.0
# Install gettext to get envsubst
RUN apt-get update && apt-get install -y gettext
ENV NODE_PATH=/tmp/node_modules APP_NAME=dashboard-reddit
# Install dependencies first to use cache
RUN npm install -g http-server bower
ADD bower.json /usr/src/bower.json
RUN cd /usr/src && \
bower install --allow-root
WORKDIR /usr/src/app/
ADD elements elements
ADD bower.json /usr/src/app/bower.json
RUN bower link --allow-root
RUN bower install --allow-root && mv bower_components ..
ADD . /usr/src/app
WORKDIR /usr/src/app/
CMD ["/usr/src/app/init.sh"]
NAME=dashboard-reddit
DEVPORT=8080
# Deployment with Kubernetes
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
# if you enable Kubernetes integration in a project.
#
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
# be manually set in every project.
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
# the group level. In this case, the variable contains the whole content of the certificate,
# which we dump to a temporary file
#
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
KUBE_CA_TEMP=false
ifeq ($(wildcard $(KUBE_CA_PEM_FILE)),)
KUBE_CA_PEM_FILE:="$$PWD/.ca.crt"
CREATED:=$(shell echo -e "$$KUBE_CA_BUNDLE" > $(KUBE_CA_PEM_FILE))
endif
KUBE_URL=""
KUBE_TOKEN=""
KUBE_NAMESPACE=$(NAME)
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
CI_PROJECT_NAME=$(NAME)
CI_REGISTRY=docker.io
CI_REGISTRY_USER=gitlab
CI_COMMIT_REF_NAME=master
help: ## Show this help.
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)\s*##\s*\(.*\)/\1\t\t\2\n/'
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
@echo ". ../.env || true;"
@awk '{ print "export " $$0}' .env
@echo "# Please, run: "
@echo "# eval \$$(make config)"
# If you need to run a command on the key/value pairs, use this:
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
@cat k8s/* | envsubst | $(KUBECTL) apply -f -
deploy-check: ## Get the deployed configuration.
@$(KUBECTL) get deploy,pods,svc,ingress
login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
ifeq ($(CI_BUILD_TOKEN),)
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
else
docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
endif
info: ## Print variables. Useful for debugging.
@echo "#KUBERNETES"
@echo KUBE_URL=$(KUBE_URL)
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
@echo KUBE_TOKEN=$(KUBE_TOKEN)
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
@echo KUBECTL=$(KUBECTL)
@echo "#CI"
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
@echo CI_REGISTRY=$(CI_REGISTRY)
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
#
# For local development
#
run:
docker-compose up -d
stop:
docker-compose stop
build: ## Build all docker images
docker-compose build
push: ## Push docker all built docker images to the registry
docker-compose push
build-%: ## Build a specific image. For example, to build the 'web' image: make build-web
docker-compose build $*
push-%: ## Push a specific image to the repository. For example, to push the 'web' image: make push-web
docker-compose build $*
ci: ## Run a task locally like GitLab will run it in the server. For example: make -e action=build ci
gitlab-runner exec shell ${action}
.PHONY:
deploy info build push help deploy-check push-% build-% ci
......@@ -18,7 +18,7 @@
"reviews-table": "reviews-table#*",
"number-chart": "number-chart#^1.1.1",
"google-apis": "GoogleWebComponents/google-apis#^1.0.0",
"google-chart-elasticsearch": "google-chart-elasticsearch#^1.1.1",
"google-chart-elasticsearch": "google-chart-elasticsearch#^1.1.3",
"iron-icons": "PolymerElements/iron-icons#^1.0.0",
"paper-icon-button": "PolymerElements/paper-icon-button#^1.0.0",
"polymer": "Polymer/polymer#^1.1.0",
......@@ -28,7 +28,8 @@
"elastic-client": "DigElements/elastic-client#~1.0.1",
"webcomponentsjs": "< 1.0.0",
"spider-chart": "spider-chart#*",
"tweet-chart": "tweet-chart#*"
"tweet-chart": "tweet-chart#*",
"comment-chart": "./elements/comment-chart"
},
"resolutions": {
"webcomponentsjs": "0.7.24",
......
......@@ -21,16 +21,6 @@
url="queries.json"
handle-as="json"
last-response="{{queries}}"></iron-ajax>
<iron-ajax auto
url="/endpoint.json"
handle-as="json"
last-response="{{endpoint}}"></iron-ajax>
<!-- <template is="dom-repeat" items="{{ids}}" as="id">
<iron-ajax auto
url="{{getName(id)}}"
handle-as="json"
on-response="addPlace"></iron-ajax>
</template> -->
<paper-tabs selected="{{selected}}">
<paper-tab>Dashboard</paper-tab>
......@@ -84,12 +74,12 @@
icon='icons:list'
param='{{param}}'
options='{"title": "Comments Subreddits"}'
cols='[{"label": "subredditName", "type": "string"},{"label": "Count", "type": "number"}]'
cols='[{"label": "subreddit", "type": "string"},{"label": "Count", "type": "number"}]'
</google-chart>
</div>
<div class="chart_container" style="left: 0; float: left">
<google-chart
field="subredditName"
field="subreddit"
data="{{data}}"
id='pie-chart3'
extra-id='pie-chart3'
......@@ -239,14 +229,6 @@
}
},
aggs: {
subredditName: {
terms: {
field: "subredditName",
order: {
_count: "desc"
}
}
},
type: {
terms: {
field: "_type",
......@@ -271,16 +253,15 @@
}
}
},
// ,
//emotion: {
// terms: {
// field: "emotion",
// order: {
// _count: "desc"
// }
// }
// }
}
emotion: {
terms: {
field: "emotion",
order: {
_count: "desc"
}
}
}
}
}
}).then(function (resp) {
var myids = []
......@@ -288,7 +269,7 @@
that.ids = myids;
//console.log(that.ids)
that.data = resp;
//console.log(that.data);
console.log(that.data);
});
}
......
<link rel="import" href="../bower_components/polymer/polymer.html">
<link rel="import" href="bower_components/polymer/polymer.html">
<link rel="import" href="../bower_components/elastic-client/elastic-client.html">
<link rel="import" href="../bower_components/dashboard-reddit/dashboard-reddit.html">
<link rel="import" href="bower_components/elastic-client/elastic-client.html">
<link rel="import" href="bower_components/dashboard-reddit/dashboard-reddit.html">
<html>
<head>
<script src="bower_components/webcomponentsjs/webcomponents-lite.js"></script>
<link rel="import" href="imports.html"></link>
<style>
.info-box-icon.number-chart iron-icon.number-chart {
top: 26%;
}
</style>
</head>
<body>
<template is="dom-bind">
<elastic-client
config='{"host": "${ES_ENDPOINT_EXTERNAL}"}'
client="{{client}}"
cluster-status="{{myStatus}}">
</elastic-client>
<dashboard-reddit
client="{{client}}"></dashboard-reddit>
<!-- <button id="databutton" onclick="changedata()">Click to change data</button> -->
<script>
var datas =[
{"hits": {
"total": 20000
},
"aggregations": {
"category": {
"buckets": [
{"key": "myObject", "doc_count": 3000},
{"key": "otherObject", "doc_count": 1000}
]
}
}
},
{"hits": {
"total": 30000
},
"aggregations": {
"category": {
"buckets": [
{"key": "myObject", "doc_count": 1000},
{"key": "otherObject", "doc_count": 4000}
]
}
}
}];
var numdata = 0;
<!-- var nc1 = document.getElementById('demo-chart1'); -->
<!-- var nc2 = document.getElementById('demo-chart2'); -->
<!-- nc1.data = nc2.data = datas[0]; -->
<!-- function changedata(){ -->
<!-- numdata += 1; -->
<!-- nc1.data = nc2.data = datas[numdata%2]; -->
<!-- } -->
</script>
</template>
</body>
</html>
<html>
<head>
<script src="../bower_components/webcomponentsjs/webcomponents-lite.js"></script>
<script src="bower_components/webcomponentsjs/webcomponents-lite.js"></script>
<link rel="import" href="imports.html"></link>
<style>
.info-box-icon.number-chart iron-icon.number-chart {
......@@ -14,7 +14,7 @@
<template is="dom-bind">
<elastic-client
config='{"host": "http://localhost:9200"}'
config='{"host": "localhost:9200"}'
client="{{client}}"
cluster-status="{{myStatus}}">
</elastic-client>
......
......@@ -3,33 +3,28 @@ version: '2'
services:
sefarad:
build: .
image: registry.cluster.gsi.dit.upm.es/sefarad/dashboard-reddit
ports:
- "8080:8080"
volumes:
- .:/usr/src/app
networks:
- sefarad-network
depends_on:
- elasticsearch
elasticsearch:
image: elasticsearch
ports:
- "9200:9200"
- "9300:9300"
image: "docker.elastic.co/elasticsearch/elasticsearch:5.5.2"
ulimits:
memlock:
soft: -1
hard: -1
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=false
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "xpack.security.enabled=false"
- "http.cors.enabled=true"
- 'http.cors.allow-origin=*'
volumes:
- ./elasticsearch/nodes:/usr/share/elasticsearch/data/nodes
- ./elasticsearch/config:/usr/share/elasticsearch/config
networks:
- sefarad-network
luigi:
build:
context: luigi/
volumes:
- ./luigi:/usr/src/app
networks:
- sefarad-network
- esdata:/usr/share/elasticsearch/data/
networks:
sefarad-network:
driver: bridge
\ No newline at end of file
ports:
- 9200:9200
volumes:
esdata:
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please see the documentation for further information on configuration options:
# <https://www.elastic.co/guide/en/elasticsearch/reference/5.0/settings.html>
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: sefarad
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
#node.name: node-1
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
#path.data: /path/to/data
#
# Path to log files:
#
#path.logs: /path/to/logs
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# Set the bind address to a specific IP (IPv4 or IPv6):
#
#network.host: 192.168.0.1
#
# Set a custom port for HTTP:
#
#http.port: 9200
#
# For more information, see the documentation at:
# <https://www.elastic.co/guide/en/elasticsearch/reference/5.0/modules-network.html>
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when new node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
#
#discovery.zen.minimum_master_nodes: 3
#
# For more information, see the documentation at:
# <https://www.elastic.co/guide/en/elasticsearch/reference/5.0/modules-discovery-zen.html>
#
# ---------------------------------- Gateway -----------------------------------
#
# Block initial recovery after a full cluster restart until N nodes are started:
#
#gateway.recover_after_nodes: 3
#
# For more information, see the documentation at:
# <https://www.elastic.co/guide/en/elasticsearch/reference/5.0/modules-gateway.html>
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
#action.destructive_requires_name: true
http.host: 0.0.0.0
http.cors.enabled : true
http.cors.allow-origin : "*"
http.cors.allow-methods: OPTIONS, HEAD, GET, POST, PUT, DELETE
http.cors.allow-headers: X-Requested-With, X-Auth-Token, Content-Type, Content-Length
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
-Xms2g
-Xmx2g
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
## optimizations
# disable calls to System#gc
-XX:+DisableExplicitGC
# pre-touch memory pages used by the JVM during initialization
-XX:+AlwaysPreTouch
## basic
# force the server VM
-server
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
-Djna.nosys=true
# flags to keep Netty from being unsafe
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
# log4j 2
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
-Dlog4j.skipJansi=true
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps
# ensure the directory exists and has sufficient space
#-XX:HeapDumpPath=${heap.dump.path}
## GC logging
#-XX:+PrintGCDetails
#-XX:+PrintGCTimeStamps
#-XX:+PrintGCDateStamps
#-XX:+PrintClassHistogram
#-XX:+PrintTenuringDistribution
#-XX:+PrintGCApplicationStoppedTime
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${loggc}
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
# If documents were already indexed with unquoted fields in a previous version
# of Elasticsearch, some operations may throw errors.
#
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
# only for migration purposes.
#-Delasticsearch.json.allow_unquoted_field_names=true
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true