aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKevin Hilman <khilman@baylibre.com>2019-06-04 14:34:56 -0700
committerKevin Hilman <khilman@baylibre.com>2019-06-04 14:34:56 -0700
commitb9797e144c8434654c556477f6f8af28b377ef2d (patch)
tree7913f3b1af5408d478c5ecb5400831b5c25150e2
parentc70f91d51e4d7defded0faaa448b322872f31a1b (diff)
parent221d663ea966445931530ab79e9fa97907ca7960 (diff)
Merge branch 'healtcheck-hosting' of https://github.com/montjoie/lava-docker
* 'healtcheck-hosting' of https://github.com/montjoie/lava-docker: Permit to host healtchecks
-rw-r--r--README.md20
-rw-r--r--healthcheck/Dockerfile13
-rw-r--r--healthcheck/port.conf6
-rw-r--r--lava-master/Dockerfile1
-rwxr-xr-xlavalab-gen.py14
5 files changed, 52 insertions, 2 deletions
diff --git a/README.md b/README.md
index 942fc99..5b8b361 100644
--- a/README.md
+++ b/README.md
@@ -222,6 +222,7 @@ masters:
lava-coordinator: Does the master should ran a lava-coordinator and export its port
persistent_db: True/False (default False) Is the postgres DB is persistent over reboot
http_fqdn: The FQDN used to access the LAVA web interface. This is necessary if you use https otherwise you will issue CSRF errors.
+ healthcheck_url: Hack healthchecks hosting URL. See hosting healthchecks below
allowed_hosts: A list of FQDN used to access the LAVA master
- "fqdn1"
- "fqdn2"
@@ -269,6 +270,7 @@ slaves:
use_nfs: Does the LAVA dispatcher will run NFS jobs
use_tap: Does TAP netdevices could be used
arch: The arch of the worker (if not x86_64), only accept arm64
+ host_healthcheck: If true, enable the optional healthcheck container. See hosting healthchecks below
lava-coordinator: Does the slave should ran a lava-coordinator
expose_ser2net: Do ser2net ports need to be available on host
expose_ports: Expose port p1 on the host to p2 on the worker slave.
@@ -451,6 +453,24 @@ EXample for an upsquare and a dispatcher availlable at 192.168.66.1:
}
```
+## How to host healthchecks
+Healthchecks jobs needs externals ressources (rootfs, images, etc...).
+By default, lava-docker healthchecks uses ones hosted on our github, but this imply usage of external networks and some bandwith.
+For hosting locally healthchecks files, you can set healthcheck_host on a slave for hosting them.
+Note that doing that bring some constraints:
+- Since healthchecks jobs are hosted by the master, The healthcheck hostname must be the same accross all slaves.
+- You need to set the base URL on the master via healthcheck_url
+- If you have qemu devices, Since they are inside the docker which provides an internal DNS , you probably must use the container("healthcheck") name as hostname.
+- In case of a simple setup, you can use the slave IP as healthcheck_url
+- In more complex setup (slave sprayed on different site with different network subnets) you need to set a DNS server for having the same DNS availlable on all sites.
+
+For setting a DNS server, the easiest way is to use dnsmasq and add in /etc/hosts "healtcheck ipaddressoftheslave"
+
+Example:
+One master and slave on DC A, and one slave on DC B.
+Both slave need to have healthcheck_host to true and master will have healthcheck_url set to healthcheck:8080
+You have to add a DNS server on both slave with an healthcheck entry.
+
## Bugs, Contact
The prefered way to submit bugs are via the github issue tracker
You can also contact us on #lava-docker on the freenode IRC network
diff --git a/healthcheck/Dockerfile b/healthcheck/Dockerfile
new file mode 100644
index 0000000..b34fb79
--- /dev/null
+++ b/healthcheck/Dockerfile
@@ -0,0 +1,13 @@
+FROM bitnami/minideb:stretch
+
+RUN apt-get update && apt-get -y install git
+RUN git clone https://github.com/BayLibre/lava-healthchecks-binary.git
+
+FROM nginx:mainline-alpine
+
+COPY port.conf /etc/nginx/conf.d/
+
+COPY --from=0 /lava-healthchecks-binary/mainline /usr/share/nginx/html/mainline/
+COPY --from=0 lava-healthchecks-binary/images /usr/share/nginx/html/images/
+COPY --from=0 lava-healthchecks-binary/next /usr/share/nginx/html/next/
+COPY --from=0 lava-healthchecks-binary/stable /usr/share/nginx/html/stable/
diff --git a/healthcheck/port.conf b/healthcheck/port.conf
new file mode 100644
index 0000000..60c72f3
--- /dev/null
+++ b/healthcheck/port.conf
@@ -0,0 +1,6 @@
+# On docker, port 80 cannot be exported since lava-slave already export it
+# So port 8080 is exported instead.
+server {
+ listen 8080;
+ root /usr/share/nginx/html/;
+}
diff --git a/lava-master/Dockerfile b/lava-master/Dockerfile
index 9b56be8..b018a2d 100644
--- a/lava-master/Dockerfile
+++ b/lava-master/Dockerfile
@@ -9,6 +9,7 @@ COPY default/* /etc/default/
RUN git clone https://github.com/BayLibre/lava-healthchecks.git
RUN cp lava-healthchecks/health-checks/* /etc/lava-server/dispatcher-config/health-checks/
COPY health-checks/* /etc/lava-server/dispatcher-config/health-checks/
+RUN if [ -e /etc/lava-server/dispatcher-config/health-checks/healthcheck_url ];then sed -i "s,http.*blob/master,$(cat /etc/lava-server/dispatcher-config/health-checks/healthcheck_url)," /etc/lava-server/dispatcher-config/health-checks/* && sed -i 's,?.*$,,' /etc/lava-server/dispatcher-config/health-checks/* ;fi
RUN chown -R lavaserver:lavaserver /etc/lava-server/dispatcher-config/health-checks/
COPY devices/ /root/devices/
diff --git a/lavalab-gen.py b/lavalab-gen.py
index 6332284..9015e48 100755
--- a/lavalab-gen.py
+++ b/lavalab-gen.py
@@ -105,7 +105,7 @@ def main():
else:
masters = workers["masters"]
for master in masters:
- keywords_master = [ "name", "type", "host", "users", "groups", "tokens", "webadmin_https", "persistent_db", "zmq_auth", "zmq_auth_key", "zmq_auth_key_secret", "http_fqdn", "slave_keys", "slaveenv", "loglevel", "allowed_hosts", "lava-coordinator" ]
+ keywords_master = [ "name", "type", "host", "users", "groups", "tokens", "webadmin_https", "persistent_db", "zmq_auth", "zmq_auth_key", "zmq_auth_key_secret", "http_fqdn", "slave_keys", "slaveenv", "loglevel", "allowed_hosts", "lava-coordinator", "healthcheck_url" ]
for keyword in master:
if not keyword in keywords_master:
print("WARNING: unknown keyword %s" % keyword)
@@ -151,6 +151,10 @@ def main():
groupdir = "%s/groups" % workerdir
os.mkdir(groupdir)
worker = master
+ if "healthcheck_url" in master:
+ f_hc = open("%s/health-checks/healthcheck_url" % workerdir, 'w')
+ f_hc.write(master["healthcheck_url"])
+ f_hc.close()
webadmin_https = False
if "webadmin_https" in worker:
webadmin_https = worker["webadmin_https"]
@@ -292,7 +296,7 @@ def main():
else:
slaves = workers["slaves"]
for slave in slaves:
- keywords_slaves = [ "name", "host", "dispatcher_ip", "remote_user", "remote_master", "remote_address", "remote_rpc_port", "remote_proto", "extra_actions", "zmq_auth_key", "zmq_auth_key_secret", "default_slave", "export_ser2net", "expose_ser2net", "remote_user_token", "zmq_auth_master_key", "expose_ports", "env", "bind_dev", "loglevel", "use_nfs", "arch", "devices", "lava-coordinator", "use_tap" ]
+ keywords_slaves = [ "name", "host", "dispatcher_ip", "remote_user", "remote_master", "remote_address", "remote_rpc_port", "remote_proto", "extra_actions", "zmq_auth_key", "zmq_auth_key_secret", "default_slave", "export_ser2net", "expose_ser2net", "remote_user_token", "zmq_auth_master_key", "expose_ports", "env", "bind_dev", "loglevel", "use_nfs", "arch", "devices", "lava-coordinator", "use_tap", "host_healthcheck" ]
for keyword in slave:
if not keyword in keywords_slaves:
print("WARNING: unknown keyword %s" % keyword)
@@ -433,6 +437,12 @@ def main():
dockcomp_add_device(dockcomp, worker_name, "/dev/net/tun:/dev/net/tun")
dockcomp["services"][worker_name]["cap_add"] = []
dockcomp["services"][worker_name]["cap_add"].append("NET_ADMIN")
+ if "host_healthcheck" in worker and worker["host_healthcheck"]:
+ dockcomp["services"]["healthcheck"] = {}
+ dockcomp["services"]["healthcheck"]["ports"] = ["8080:8080"]
+ dockcomp["services"]["healthcheck"]["build"] = {}
+ dockcomp["services"]["healthcheck"]["build"]["context"] = "healthcheck"
+ shutil.copytree("healthcheck", "output/%s/healthcheck" % host)
if "extra_actions" in worker:
fp = open("%s/scripts/extra_actions" % workerdir, "w")
for eaction in worker["extra_actions"]: