title: “Assign Pod to a Node”
date: 2020-04-01T16:45:54
slug: assign-pod-to-a-node
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
env: test
spec:
containers:
- name: nginx
image: nginx
nodeName: node01
title: “Assign Pod to a Node”
date: 2020-04-01T16:45:54
slug: assign-pod-to-a-node
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
env: test
spec:
containers:
- name: nginx
image: nginx
nodeName: node01
title: “control”
date: 2020-03-21T13:39:16
slug: control
List Topic
~/kafka/bin/kafka-topics.sh –zookeeper localhost:2181 –list
delete Topic:
./kafka-topics.sh –zookeeper localhost:2181 –delete –topic pmacct.acct
Show Topics content:
./kafka-console-consumer.sh –bootstrap-server localhost:9092 –topic pmacct.acct –from-beginning
title: “non interactive apt”
date: 2020-03-04T15:43:37
slug: non-interactive-apt
if [ -x /usr/bin/apt ]; then
apt update -qq
DEBIAN\_FRONTEND=noninteractive apt-get install -y -qq awscli curl gettext-base git jq openssh-client sudo wget > /dev/null
fi
title: “Sample Vault injection”
date: 2020-02-17T09:23:42
slug: sample-vault-injection
helm delete vault-helm
kubectl delete pvc data-vault-helm-0
kubectl delete -f pv.yaml
kubectl delete -f app.yaml
kubectl delete -f app\_secrets.yaml
rm -rf /STORAGE/vault/\*
kubectl config set-context --current --namespace=default
kubectl create -f pv.yaml
helm install vault-helm vault-helm
kubectl get pods
kubectl logs vault-helm-0
kubectl exec -it vault-helm-0 sh
vault operator init
vault operator unseal
vault operator unseal
vault operator unseal
vault login
vault secrets enable -path="secret" kv
cat <<EOF > /home/vault/app-policy.hcl
path "secret\*" {
capabilities = ["read"]
}
EOF
vault policy write app /home/vault/app-policy.hcl
vault auth enable kubernetes
vault write auth/kubernetes/config \
token\_reviewer\_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
kubernetes\_host=https://${KUBERNETES\_PORT\_443\_TCP\_ADDR}:443 \
kubernetes\_ca\_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
vault write auth/kubernetes/role/myapp \
bound\_service\_account\_names=app \
bound\_service\_account\_namespaces=default \
policies=app \
ttl=1h
vault kv put secret/helloworld username=foobaruser password=foobarbazpass
kubectl create -f app.yaml
kubectl get pods
kubectl delete -f app.yaml
kubectl create -f app\_secrets.yaml
kubectl get pods
cat /vault/secrets/helloworld
title: “Create self signed Cert without key”
date: 2020-02-16T10:42:16
slug: create-self-signed-cert-without-key
openssl req -newkey rsa:4096 \
-x509 \
-sha256 \
-days 3650 \
-nodes \
-out example.crt \
-keyout example.key \
-subj "/C=SI/ST=Ljubljana/L=Ljubljana/O=Security/OU=IT Department/CN=www.example.com"
title: “Unseal Vault”
date: 2020-02-15T19:02:02
slug: unseal-vault
https://www.vaultproject.io/intro/getting-started/
$ vault operator init
Unseal Key 1: 4jYbl2CBIv6SpkKj6Hos9iD32k5RfGkLzlosrrq/JgOm
Unseal Key 2: B05G1DRtfYckFV5BbdBvXq0wkK5HFqB9g2jcDmNfTQiS
Unseal Key 3: Arig0N9rN9ezkTRo7qTB7gsIZDaonOcc53EHo83F5chA
Unseal Key 4: 0cZE0C/gEk3YHaKjIWxhyyfs8REhqkRW/CSXTnmTilv+
Unseal Key 5: fYhZOseRgzxmJCmIqUdxEm9C3jB5Q27AowER9w4FC2Ck
Initial Root Token: s.KkNJYWF5g0pomcCLEmDdOVCW
Vault initialized with 5 key shares and a key threshold of 3. Please securely
distribute the key shares printed above. When the Vault is re-sealed,
restarted, or stopped, you must supply at least 3 of these keys to unseal it
before it can start servicing requests.
Vault does not store the generated master key. Without at least 3 key to
reconstruct the master key, Vault will remain permanently sealed!
It is possible to generate new unseal keys, provided you have a quorum of
existing unseal keys shares. See "vault operator rekey" for more information.
vault operator unseal
vault operator unseal
vault operator unseal
export VAULT\_ADDR='https://127.0.0.1:8200'
vault login
vault secrets enable -path="vw-rv-cod" kv # kv = type of key-value
vault secrets list
vault kv put vw-rv-cod/k8s/apps/hgw db\_pass=asvkjfdsfhjisvh
# Alle Passwörter unter dem Pfad vw-rv-cod/k8s/apps/hgw
vault kv get vw-rv-cod/k8s/apps/hgw
vault kv get -field=db\_pass vw-rv-cod/k8s/apps/hgw
# Ausgabe im json Format
vault kv get -format=json vw-rv-cod/k8s/apps/hgw
# Anschließend Json decoded
vault kv get -format=json vw-rv-cod/k8s/apps/hgw | jq -r .data.data.db\_pass
# Zwei Passwort Einträge
vault kv put vw-rv-cod/k8s/apps/hgw db\_user=asvkjfdsfhj db\_pass=asvkjfdsfhjisvh
# Passwort löschen
vault kv delete vw-rv-cod/k8s/apps/hgw
Policy
vault policy list
# Create Policy File
cat <<EOF > my-policy.hcl
path "vw-rv-cod/\*" {
capabilities = ["read"]
}
EOF
# Check Syntax of policy file
vault policy fmt my-policy.hcl
# Write Policy
vault policy write my-policy my-policy.hcl
# Show Policy
vault policy read my-policy
# Create a User Token for the policy
vault token create -policy=my-policy
# Login with the created Token
vault login s.FyivwzB9Z4ZYRseD7hwQxNsd
Generate Password
< /dev/urandom tr -dc \_A-Z-a-z-0-9 | head -c${1:-32};echo;
Read Secret via Curl
curl \
-H "X-Vault-Token: f3b09679-3001-009d-2b80-9c306ab81aa6" \
-X GET \
http://127.0.0.1:8200/v1/secret/foo
title: “Openshift prometheus targets”
date: 2020-02-14T18:02:43
slug: openshift-prometheus-targets
Create Rolebinding for prometheus for scraping:
apiVersion: authorization.openshift.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: emqx-openshift
rules:
- apiGroups:
- ""
attributeRestrictions: null
resources:
- endpoints
- nodes
- pods
- services
verbs:
- get
- list
- watch
---
apiVersion: authorization.openshift.io/v1
groupNames: null
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: emqx-openshift
roleRef:
name: prometheus-k8s
namespace: emqx-openshift
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: openshift-monitoring
userNames:
- system:serviceaccount:openshift-monitoring:prometheus-k8s
Create a Service Monitor
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: emqx-metrics
name: emqx-metrics
namespace: openshift-monitoring
spec:
endpoints:
- interval: 30s
port: metrics
scheme: http
targetPort: 9091
jobLabel: k8s-app
namespaceSelector:
matchNames:
- emqx-openshift
selector:
matchLabels:
k8s-app: emqx-metrics
Create a metrics Service
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2020-02-14T16:38:26Z"
labels:
k8s-app: emqx-metrics
name: emqx
namespace: emqx-openshift
resourceVersion: "810916"
selfLink: /api/v1/namespaces/emqx-openshift/services/emqx
uid: 6c740138-4f48-11ea-8568-fa163e8ef4af
spec:
clusterIP: None
ports:
- name: metrics
port: 9091
protocol: TCP
targetPort: 9091
selector:
deploymentconfig: emqx-openshift
sessionAffinity: None
type: ClusterIP
title: “okd hosts file”
date: 2020-02-13T14:03:08
slug: okd-hosts-file
# Create an OSEv3 group that contains the masters, nodes, and etcd groups
[OSEv3:children]
masters
nodes
etcd
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# SSH user, this user should allow ssh based auth without requiring a password
ansible\_ssh\_user=root
# If ansible\_ssh\_user is not root, ansible\_become must be set to true
#ansible\_become=true
openshift\_deployment\_type=origin
openshift\_disable\_check=disk\_availability,docker\_storage,memory\_availability,docker\_image\_availability
# uncomment the following to enable htpasswd authentication; defaults to AllowAllPasswordIdentityProvider
openshift\_master\_identity\_providers=[{'name': 'htpasswd\_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift\_master\_htpasswd\_users={'admin': 'admin:$apr1$AysC7Y3Y$LN.UA3Pn1qobSrsxJ7qHN0'}
openshift\_master\_htpasswd\_file='/etc/origin/master/htpasswd'
openshift\_docker\_options="--signature-verification=false --insecure-registry 172.30.0.0/16 --log-driver=journald --registry-mirror=https://registry.docker-cn.com"
openshift\_public\_hostname=registry.os.asanger.eu
openshift\_master\_default\_subdomain=apps.os.asanger.eu
openshift\_master\_api\_port=8443
openshift\_master\_console\_port=8443
openshift\_hosted\_infra\_selector=""
openshift\_logging\_install\_logging=false
openshift\_logging\_es\_nodeselector={"node-role.kubernetes.io/infra":"true"}
openshift\_logging\_kibana\_nodeselector={"node-role.kubernetes.io/infra":"true"}
openshift\_logging\_curator\_nodeselector={"node-role.kubernetes.io/infra":"true"}
# host group for masters
[masters]
registry.os.asanger.eu openshift\_public\_hostname=registry.os.asanger.eu
# host group for etcd
[etcd]
registry.os.asanger.eu openshift\_public\_ip=0.0.0.0
# host group for nodes, includes region info
[nodes]
registry.os.asanger.eu openshift\_node\_group\_name='node-config-all-in-one'
title: “Enterprise inventory file”
date: 2020-02-13T09:21:08
slug: enterprise-inventory-file
[OSEv3:children]
masters
nodes
etcd
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
oreg\_auth\_user=tomskiffb
oreg\_auth\_password=v3NtPkdo7nfijV7SoZhf
openshift\_web\_console\_install=true
osm\_use\_cockpit=true
openshift\_logging\_install\_logging=true
openshift\_disable\_check=disk\_availability,docker\_storage,memory\_availability,docker\_image\_availability
# SSH user, this user should allow ssh based auth without requiring a password
ansible\_ssh\_user=root
# If ansible\_ssh\_user is not root, ansible\_become must be set to true
#ansible\_become=true
openshift\_deployment\_type=openshift-enterprise
#deployment\_subtype=registry
openshift\_hosted\_infra\_selector=""
openshift\_logging\_es\_nodeselector={"node-role.kubernetes.io/infra":"true"}
openshift\_logging\_kibana\_nodeselector={"node-role.kubernetes.io/infra":"true"}
openshift\_logging\_curator\_nodeselector={"node-role.kubernetes.io/infra":"true"}
# uncomment the following to enable htpasswd authentication; defaults to DenyAllPasswordIdentityProvider
openshift\_master\_identity\_providers=[{'name': 'htpasswd\_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
openshift\_master\_htpasswd\_users={'admin': 'admin:$apr1$AysC7Y3Y$LN.UA3Pn1qobSrsxJ7qHN0'}
openshift\_master\_htpasswd\_file='/etc/origin/master/htpasswd'
openshift\_public\_hostname=registry.os.asanger.eu
openshift\_master\_default\_subdomain=apps.os.asanger.eu
openshift\_master\_api\_port=8443
openshift\_master\_console\_port=8443
# host group for masters
[masters]
registry.os.asanger.eu
# host group for etcd
[etcd]
registry.os.asanger.eu
# host group for nodes, includes region info
[nodes]
registry.os.asanger.eu openshift\_node\_group\_name='node-config-all-in-one'
title: “mysqlbinlog binlog anzeigen”
date: 2020-02-12T09:02:55
slug: mysqlbinlog-binlog-anzeigen
mysqlbinlog --start-datetime="2020-02-12 06:00:00" --stop-datetime="2020-02-12 10:00:00" --base64-output=decode-rows mysql-bin.000190