Tag Archives: exported

ssh gateway


title: “ssh gateway”
date: 2018-04-17T09:19:35
slug: ssh-gateway


cat .ssh/config

Other stuff you’d like goes here

Host muc-deploy
ForwardAgent yes

Host *.ampua.server.lan
ControlMaster auto
ControlPath ~/.ssh/connections/%r_%h_%p
ControlPersist yes
ProxyCommand none
PasswordAuthentication Yes

Uncomment this in case you want to use sshgw by dc

Host *-bs*

ProxyCommand /usr/bin/ssh ampua-bs-sshgw.ampua.server.lan %h

Host *-ba*

ProxyCommand /usr/bin/ssh ampua-bap-sshgw.ampua.server.lan %h

Host *-lx*

ProxyCommand /usr/bin/ssh ampua-lxa-sshgw.ampua.server.lan %h

Exceptions which do not pass the sshgw (e.g. git, jenkins)

You need to pass User and ProxyCommand none

Host github0*.server.lan github.com git.mamdev.server.lan puppet-repo*
User git
ProxyCommand none
Host ndcli.server.lan 46.165.253.99 35.156.56.25 46.165.253.133 dev.asanger.biz hsp-gmx-pre01.server.lan *cinetic.de logservice* unitix-repo01* reposrv-deb* lxjumper* osum-home-master *.united.domain
User tasanger
ProxyCommand none
Host *
User juxxpd
ProxyCommand /usr/bin/ssh ampua-bs-sshgw.ampua.server.lan %h

flood stage disk watermark [95%] exceeded on


title: “flood stage disk watermark [95%] exceeded on”
date: 2018-04-11T09:49:52
slug: flood-stage-disk-watermark-95-exceeded-on


curl -XPUT 'localhost:9200/_cluster/settings?pretty' -H 'Content-Type: application/json' -d'
{
"transient": {
"cluster.routing.allocation.disk.watermark.low": "4gb",
"cluster.routing.allocation.disk.watermark.high": "2gb",
"cluster.routing.allocation.disk.watermark.flood_stage": "1gb",
"cluster.info.update.interval": "1m"
}
}
'
{
"acknowledged" : true,
"persistent" : { },
"transient" : {
"cluster" : {
"routing" : {
"allocation" : {
"disk" : {
"watermark" : {
"low" : "4gb",
"flood_stage" : "1gb",
"high" : "2gb"
}
}
}
},
"info" : {
"update" : {
"interval" : "1m"
}
}
}
}
}

create a proxy URL


title: “create a proxy URL”
date: 2018-03-23T15:30:50
slug: create-a-proxy-url


https://173.212.228.153:6443/api/v1/namespaces/gitlab-managed-apps/services/prometheus-prometheus-kube-state-metrics:80/proxy

Glusterfs single server


title: “Glusterfs single server”
date: 2018-03-23T13:54:23
slug: glusterfs-single-server


mkdir /data
apt-get install glusterfs-server
gluster volume create k8s_prometheus vmd25840.contaboserver.net:/data/k8s_prometheus force
gluster volume start k8s_prometheus

Openvpn für Kubernetes


title: “Openvpn für Kubernetes”
date: 2018-02-27T13:40:03
slug: openvpn-fur-kubernetes


apiVersion: v1
kind: Service
metadata:
labels:
chart: openvpn-2.0.2
type: openvpn
name: openvpn
namespace: default
spec:
ports:
– name: openvpn
nodePort: 30203
port: 443
protocol: TCP
targetPort: 443
selector:
app: openvpn
sessionAffinity: None
type: LoadBalancer

apiVersion: v1
data:
configure.sh: |-
#!/bin/sh
/etc/openvpn/setup/setup-certs.sh
iptables -t nat -A POSTROUTING -s 10.240.0.0/255.255.0.0 -o eth0 -j MASQUERADE
mkdir -p /dev/net
if [ ! -c /dev/net/tun ]; then
mknod /dev/net/tun c 10 200
fi

if [ “$DEBUG” == “1” ]; then
echo ========== ${OVPN_CONFIG} ==========
cat “${OVPN_CONFIG}”
echo ====================================
fi
IP=$(ip route get 8.8.8.8 | awk ‘/8.8.8.8/ {print $NF}’)
BASEIP=echo $IP | cut -d”.” -f1-3
NETWORK=echo $BASEIP”.0″
DNS=$(cat /etc/resolv.conf | grep -v ‘^#’ | grep nameserver | awk ‘{print $2}’)
SEARCH=$(cat /etc/resolv.conf | grep -v ‘^#’ | grep search | awk ‘{$1=””; print $0}’)
cp -f /etc/openvpn/setup/openvpn.conf /etc/openvpn/
sed ‘s|OVPN_K8S_SEARCH|’”${SEARCH}”‘|’ -i /etc/openvpn/openvpn.conf
sed ‘s|OVPN_K8S_DNS|’”${DNS}”‘|’ -i /etc/openvpn/openvpn.conf
sed ‘s|NETWORK|’”${NETWORK}”‘|’ -i /etc/openvpn/openvpn.conf

openvpn –config /etc/openvpn/openvpn.conf
newClientCert.sh: |-
#!/bin/bash
EASY_RSA_LOC=”/etc/openvpn/certs”
cd $EASY_RSA_LOC
MY_IP_ADDR=”$2″
./easyrsa build-client-full $1 nopass
cat >${EASY_RSA_LOC}/pki/$1.ovpn <
cat ${EASY\_RSA\_LOC}/pki/private/$1.key

cat ${EASY\_RSA\_LOC}/pki/issued/$1.crt

cat ${EASY\_RSA\_LOC}/pki/ca.crt

cat ${EASY\_RSA\_LOC}/pki/dh.pem

remote ${MY_IP_ADDR} 443 tcp

EOF
cat pki/$1.ovpn
openvpn.conf: |-
server 10.240.0.0 255.255.0.0
verb 3
key /etc/openvpn/certs/pki/private/server.key
ca /etc/openvpn/certs/pki/ca.crt
cert /etc/openvpn/certs/pki/issued/server.crt
dh /etc/openvpn/certs/pki/dh.pem

key-direction 0
keepalive 10 60
persist-key
persist-tun

proto tcp
port 443
dev tun0
status /tmp/openvpn-status.log

user nobody
group nogroup

push “route NETWORK 255.255.240.0″

push “route 10.0.0.0 255.0.0.0″

push “dhcp-option DOMAIN OVPN_K8S_SEARCH”
push “dhcp-option DNS OVPN_K8S_DNS”
setup-certs.sh: |-
#!/bin/bash
EASY_RSA_LOC=”/etc/openvpn/certs”
SERVER_CERT=”${EASY_RSA_LOC}/pki/issued/server.crt”
if [ -e “$SERVER_CERT” ]
then
echo “found existing certs – reusing”
else
cp -R /usr/share/easy-rsa/* $EASY_RSA_LOC
cd $EASY_RSA_LOC
./easyrsa init-pki
echo “ca
” | ./easyrsa build-ca nopass
./easyrsa build-server-full server nopass
./easyrsa gen-dh
fi
kind: ConfigMap
metadata:
name: openvpn
namespace: default

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: “1”
generation: 1
labels:
chart: openvpn-2.0.2
heritage: Tiller
release: messy-coral
name: openvpn
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: openvpn
chart: openvpn-2.0.2
heritage: Tiller
release: messy-coral
type: openvpn
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: openvpn
chart: openvpn-2.0.2
heritage: Tiller
release: messy-coral
type: openvpn
spec:
containers:
– command:
– /etc/openvpn/setup/configure.sh
image: jfelten/openvpn-docker:1.1.0
imagePullPolicy: IfNotPresent
name: openvpn
ports:
– containerPort: 443
name: openvpn
protocol: TCP
resources:
limits:
cpu: 300m
memory: 128Mi
requests:
cpu: 300m
memory: 128Mi
securityContext:
capabilities:
add:
– NET_ADMIN
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
– mountPath: /etc/openvpn/setup
name: openvpn
– mountPath: /etc/openvpn/certs
name: certs
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
– configMap:
defaultMode: 509
name: openvpn
name: openvpn
– hostPath:
path: /etc/openvpn/certs
name: certs

Install helm


title: “Install helm”
date: 2018-02-27T11:46:54
slug: install-helm


tiller_role.yaml

Für jeden Namespace wiederholen:

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
 annotations:
 rbac.authorization.kubernetes.io/autoupdate: "true"
 labels:
 kubernetes.io/bootstrapping: rbac-defaults
 name: tiller
 namespace: tiller
rules:
- apiGroups:
 - '\*'
 resources:
 - '\*'
 verbs:
 - '\*'
kubectl create ns tiller
kubectl create serviceaccount --namespace tiller tiller
helm init --service-account tiller --tiller-namespace=default
kubectl create rolebinding tiller --role=tiller --namespace=tiller
kubectl create rolebinding tiller --role=tiller --namespace=default

K8S Master Isolation


title: “K8S Master Isolation”
date: 2018-02-14T14:40:53
slug: k8s-master-isolation


Master Isolation

By default, your cluster will not schedule pods on the master for security reasons. If you want to be able to schedule pods on the master, e.g. for a single-machine Kubernetes cluster for development, run:

kubectl taint nodes --all node-role.kubernetes.io/master-