Category Archives: Kubernetes

Update Dashboard Password


title: “Update Dashboard Password”
date: 2020-04-12T07:22:41
slug: update-dashboard-password


[root@rook-ceph-operator-9bd79cdcf-npkm8 /]# ceph -c /var/lib/rook/rook-ceph/rook-ceph.config dashboard set-login-credentials admin admin
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
\*\*\* WARNING: this command is deprecated. \*\*\*
\*\*\* Please use the ac-user-\* related commands to manage users. \*\*\*
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
Username and password updated

Backup and Restore ETCD


title: “Backup and Restore ETCD”
date: 2020-04-06T13:27:03
slug: backup-and-restore-etcd


Get etcdctl Tool:
https://github.com/etcd-io/etcd/releases/download/v3.4.7/etcd-v3.4.7-linux-amd64.tar.gz

Create a Snapshot

ETCDCTL\_API=3 etcdctl --endpoints=https://[127.0.0.1]:2379 \
 --cacert=/etc/kubernetes/pki/etcd/ca.crt \
 --cert=/etc/kubernetes/pki/etcd/server.crt
 --key=/etc/kubernetes/pki/etcd/server.key \
 snapshot save /tmp/snapshot-pre-boot.db

Status of Snapshot:

ETCDCTL\_API=3 etcdctl snapshot status /tmp/snapshot-pre-boot.db -w table

Restore ETCD Snapshot to a new folder

ETCDCTL\_API=3 etcdctl snapshot restore -h

ETCDCTL\_API=3 etcdctl \
 --endpoints=https://[127.0.0.1]:2379 \
 --cacert=/etc/kubernetes/pki/etcd/ca.crt \
 --name=master \
 --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key \
 --data-dir /var/lib/etcd-from-backup \
 --initial-cluster=master=https://127.0.0.1:2380 \
 --initial-cluster-token etcd-cluster-1 \
 --initial-advertise-peer-urls=https://127.0.0.1:2380 \
 snapshot restore /tmp/snapshot-pre-boot.db

Modify /etc/kubernetes/manifests/etcd.yaml
– –data-dir=/var/lib/etcd-from-backup
– –initial-cluster-token=etcd-cluster-1
– mountPath: /etc/kubernetes/pki/etcd
path: /var/lib/etcd-from-backup

spec:
 containers:
 - command:
 - etcd
 - --advertise-client-urls=https://172.17.0.45:2379
 - --cert-file=/etc/kubernetes/pki/etcd/server.crt
 - --client-cert-auth=true
 - --data-dir=/var/lib/etcd-from-backup
 - --initial-cluster-token=etcd-cluster-1
 - --initial-advertise-peer-urls=https://172.17.0.45:2380
 - --initial-cluster=master=https://172.17.0.45:2380
 - --key-file=/etc/kubernetes/pki/etcd/server.key
 - --listen-client-urls=https://127.0.0.1:2379,https://172.17.0.45:2379
 - --listen-metrics-urls=http://127.0.0.1:2381
 - --listen-peer-urls=https://172.17.0.45:2380
 - --name=master
 - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
 - --peer-client-cert-auth=true
 - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
 - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
 - --snapshot-count=10000
 - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
.
.
.
 volumeMounts:
 - mountPath: /var/lib/etcd-from-backup
 name: etcd-data
 - mountPath: /etc/kubernetes/pki/etcd
 name: etcd-certs
 hostNetwork: true
 priorityClassName: system-cluster-critical
 volumes:
 - hostPath:
 path: /etc/kubernetes/pki/etcd
 type: DirectoryOrCreate
 name: etcd-certs
 - hostPath:
 path: /var/lib/etcd-from-backup
 type: DirectoryOrCreate
 name: etcd-data
status: {}

Kubernetes Metrics Server


title: “Kubernetes Metrics Server”
date: 2020-04-05T12:55:07
slug: kubernetes-metrics-server


git clone https://github.com/kubernetes-incubator/metrics-server.git
kubectl create -f .
kubectl top node
kubectl top pod

Bei x509 Fehlern:

spec:
 containers:
 - args:
 - --cert-dir=/tmp
 - --secure-port=4443
 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
 - --kubelet-use-node-status-port
 - --kubelet-insecure-tls

Create Certificate Request


title: “Create Certificate Request”
date: 2020-04-04T13:18:52
slug: 1207-2


apiVersion: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
metadata:
 name: akshay
spec:
 groups:
 - system:authenticated
 request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZV3R6YUdGNU1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQXgrVkQ4MjJVZEhXMzhMTVc3S0xxVlBPaHZqaUFmV0paVjUwa0tDTEZQSnIzCnJTUEVPM0JNUFAwcnI5QU16RDNFaXpoOUtTNndzQjRjbHk4NUtzOWxwT2puMkhCODhlTVRYOEhBWUdmTjA0V2cKL3g4Z2MrMWw3QzNhT3JnbUNNaTVOYzVqVVhPQ01pTldkcGRBbWk4MHNXRy9BVUJuYnpqdjNvaGE5ODZvdlhMUApJQ29tSFNuQjBkVkE1ZmNyWWlZN21IZHV4ZlBBbFBFUXA4allaTEV4MEZ0cDBnMWJWTkRybEY5Zm5SQVBFbzlZClNLcjBmdEJpYUpWejJSSTl2YUR5QTlqYU5vbGN4OVdiTjBqUVR5eFhyc0xHMVVReFdvdk5WejM2TFNXd2NYc3EKQnhmWEk3NVFXeXBDYnpmNitVaG8wRk41TkptNDhmMXNwcWdKSVhFd1Z3SURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBRXFnclN5UTZNRUM4dnc1ekp1YU5ScVo3NEhzK0htNlJxOG1sSnBCdmg4ck9KYnp5cjFTCm1LZUZqMVNlcVF2Q2s1czJld0tBYUJrczRwVHNRSWlNa2FOZXgweE9yUTVUQTB0RHlrZUpZcjRPalA5cmVRRDUKeUFKalp0MnVYeFNmY0wwdkM0WTNZQlRqUWd3U3FCQXJlUzgvWjFWT3draHVpbmtuZU1Oa1U3RlJkQ1hCcHh0ZQpDRE1naXlYa3lpbVh5U3RVZVhFNDJweUlJQjgyeTRNYjQrYURyOGR6MkhCRmpzcE10RVJOaUtrTkFCLzErc3hOCnA5MDZaQTNsRmdVRENNUjdVY29VSUMvTjN4SUExVDM5cTBFZzI4RENTYzNMWGZWTFlsL3NSeVdvdVd6eUFwZTMKcldFSDRhREltTTdXRjNGYjFHWlpUSHlBRElvZXR1cGdNdGc9Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=
 usages:
 - digital signature
 - key encipherment
 - server auth

Authentication with Password File


title: “Authentication with Password File”
date: 2020-04-04T12:30:30
slug: authentication-with-password-file


/tmp/users/user-details.csv

# User File Contents
password123,user1,u0001
password123,user2,u0002
password123,user3,u0003
password123,user4,u0004
password123,user5,u0005

/etc/kubernetes/manifests/kube-apiserver.yaml

apiVersion: v1
kind: Pod
metadata:
 name: kube-apiserver
 namespace: kube-system
spec:
 containers:
 - command:
 - kube-apiserver
 - --basic-auth-file=/tmp/users/user-details.csv
 image: k8s.gcr.io/kube-apiserver-amd64:v1.11.3
 name: kube-apiserver
 volumeMounts:
 - mountPath: /tmp/users
 name: usr-details
 readOnly: true
 volumes:
 - hostPath:
 path: /tmp/users
 type: DirectoryOrCreate
 name: usr-details

Create the necessary roles and role bindings for these users:

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 namespace: default
 name: pod-reader
rules:
- apiGroups: [""] # "" indicates the core API group
 resources: ["pods"]
 verbs: ["get", "watch", "list"]

---
# This role binding allows "jane" to read pods in the "default" namespace.
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: read-pods
 namespace: default
subjects:
- kind: User
 name: user1 # Name is case sensitive
 apiGroup: rbac.authorization.k8s.io
roleRef:
 kind: Role #this must be Role or ClusterRole
 name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to
 apiGroup: rbac.authorization.k8s.io

Once created, you may authenticate into the kube-api server using the users credentials

curl -v -k https://localhost:6443/api/v1/pods -u "user1:password123"

Sample Vault injection


title: “Sample Vault injection”
date: 2020-02-17T09:23:42
slug: sample-vault-injection


helm delete vault-helm
kubectl delete pvc data-vault-helm-0
kubectl delete -f pv.yaml
kubectl delete -f app.yaml
kubectl delete -f app\_secrets.yaml
rm -rf /STORAGE/vault/\*

kubectl config set-context --current --namespace=default
kubectl create -f pv.yaml
helm install vault-helm vault-helm
kubectl get pods
kubectl logs vault-helm-0
kubectl exec -it vault-helm-0 sh

vault operator init
vault operator unseal
vault operator unseal
vault operator unseal
vault login
vault secrets enable -path="secret" kv

cat <<EOF > /home/vault/app-policy.hcl
path "secret\*" {
 capabilities = ["read"]
}
EOF

vault policy write app /home/vault/app-policy.hcl

vault auth enable kubernetes

vault write auth/kubernetes/config \
 token\_reviewer\_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
 kubernetes\_host=https://${KUBERNETES\_PORT\_443\_TCP\_ADDR}:443 \
 kubernetes\_ca\_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt

vault write auth/kubernetes/role/myapp \
 bound\_service\_account\_names=app \
 bound\_service\_account\_namespaces=default \
 policies=app \
 ttl=1h

vault kv put secret/helloworld username=foobaruser password=foobarbazpass

kubectl create -f app.yaml
kubectl get pods
kubectl delete -f app.yaml
kubectl create -f app\_secrets.yaml
kubectl get pods
cat /vault/secrets/helloworld

Unseal Vault


title: “Unseal Vault”
date: 2020-02-15T19:02:02
slug: unseal-vault


https://www.vaultproject.io/intro/getting-started/

$ vault operator init

Unseal Key 1: 4jYbl2CBIv6SpkKj6Hos9iD32k5RfGkLzlosrrq/JgOm
Unseal Key 2: B05G1DRtfYckFV5BbdBvXq0wkK5HFqB9g2jcDmNfTQiS
Unseal Key 3: Arig0N9rN9ezkTRo7qTB7gsIZDaonOcc53EHo83F5chA
Unseal Key 4: 0cZE0C/gEk3YHaKjIWxhyyfs8REhqkRW/CSXTnmTilv+
Unseal Key 5: fYhZOseRgzxmJCmIqUdxEm9C3jB5Q27AowER9w4FC2Ck

Initial Root Token: s.KkNJYWF5g0pomcCLEmDdOVCW

Vault initialized with 5 key shares and a key threshold of 3. Please securely
distribute the key shares printed above. When the Vault is re-sealed,
restarted, or stopped, you must supply at least 3 of these keys to unseal it
before it can start servicing requests.

Vault does not store the generated master key. Without at least 3 key to
reconstruct the master key, Vault will remain permanently sealed!

It is possible to generate new unseal keys, provided you have a quorum of
existing unseal keys shares. See "vault operator rekey" for more information.

vault operator unseal
vault operator unseal
vault operator unseal
export VAULT\_ADDR='https://127.0.0.1:8200'
vault login

vault secrets enable -path="vw-rv-cod" kv # kv = type of key-value
vault secrets list
vault kv put vw-rv-cod/k8s/apps/hgw db\_pass=asvkjfdsfhjisvh
# Alle Passwörter unter dem Pfad vw-rv-cod/k8s/apps/hgw
vault kv get vw-rv-cod/k8s/apps/hgw
vault kv get -field=db\_pass vw-rv-cod/k8s/apps/hgw
# Ausgabe im json Format
vault kv get -format=json vw-rv-cod/k8s/apps/hgw
# Anschließend Json decoded
vault kv get -format=json vw-rv-cod/k8s/apps/hgw | jq -r .data.data.db\_pass
# Zwei Passwort Einträge
vault kv put vw-rv-cod/k8s/apps/hgw db\_user=asvkjfdsfhj db\_pass=asvkjfdsfhjisvh
# Passwort löschen
vault kv delete vw-rv-cod/k8s/apps/hgw

Policy

vault policy list

# Create Policy File
cat <<EOF > my-policy.hcl
path "vw-rv-cod/\*" {
 capabilities = ["read"]
}
EOF

# Check Syntax of policy file
vault policy fmt my-policy.hcl

# Write Policy
vault policy write my-policy my-policy.hcl
# Show Policy
vault policy read my-policy

# Create a User Token for the policy
vault token create -policy=my-policy

# Login with the created Token
vault login s.FyivwzB9Z4ZYRseD7hwQxNsd

Generate Password

< /dev/urandom tr -dc \_A-Z-a-z-0-9 | head -c${1:-32};echo;

Read Secret via Curl

curl \
 -H "X-Vault-Token: f3b09679-3001-009d-2b80-9c306ab81aa6" \
 -X GET \
 http://127.0.0.1:8200/v1/secret/foo

K8S Zertifikate erneuern


title: “K8S Zertifikate erneuern”
date: 2019-11-20T07:44:45
slug: k8s-zertifikate-erneuern


On Kubernetes master node:

  1. Backup old certificates:

“`
# mkdir -p /root/kube-backup/kubernetes-pki /root/kube-backup/kubernetes-conf /root/kube-backup/kubelet-pki

mv /etc/kubernetes/pki/* /root/kube-backup/kubernetes-pki/

mv /etc/kubernetes/*.conf /root/kube-backup/kubernetes-conf/

“`

  1. Renew the certificates and kubeconfig files of the core services:

“`
# K8S_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | cut -d/ -f3 | cut -d: -f1)

kubeadm alpha phase certs all –apiserver-advertise-address $K8S_IP

kubeadm alpha phase kubeconfig all –apiserver-advertise-address $K8S_IP

“`

For installations behind proxy, it should be passed as a variable behind the kubeadm command:

“`
# http_proxy=http://192.168.10.12:8008 https_proxy=http://192.168.10.12:8008 kubeadm alpha phase certs all –apiserver-advertise-address $K8S_IP

“`

  1. Renew the config file to manage the cluster with kubectl:

“`
# \cp -arf /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

chmod 777 $HOME/.kube/config

“`

  1. Renew kubelet certificates:

“`
# systemctl stop kubelet

systemctl stop docker

mv /var/lib/kubelet/pki/* /root/kube-backup/kubelet-pki/

systemctl start docker

systemctl start kubelet

“`

google nameservers in Kubernetes


title: “google nameservers in Kubernetes”
date: 2019-08-07T15:18:41
slug: google-nameservers-in-kubernetes


kubectl edit configmaps -n kube-system coredns
apiVersion: v1
data:
 Corefile: |
 .:53 {
 errors
 health
 kubernetes cluster.local in-addr.arpa ip6.arpa {
 pods insecure
 upstream
 fallthrough in-addr.arpa ip6.arpa
 ttl 30
 }
 prometheus :9153
 forward . 8.8.8.8 8.8.4.4
 cache 30
 loop
 reload
 loadbalance
 }
kind: ConfigMap
metadata:
 creationTimestamp: "2019-08-06T20:00:32Z"
 name: coredns
 namespace: kube-system
 resourceVersion: "116259"
 selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
 uid: d5892fc1-b2fe-4f46-b3bb-4185a34f8f3e