title: “add yum repo by url”
date: 2020-01-08T11:23:16
slug: add-yum-repo-by-url
yum-config-manager --add-repo=http://mirror.centos.org/centos/7/paas/x86\_64/openshift-origin
title: “add yum repo by url”
date: 2020-01-08T11:23:16
slug: add-yum-repo-by-url
yum-config-manager --add-repo=http://mirror.centos.org/centos/7/paas/x86\_64/openshift-origin
title: “Configure PXE (Network Boot) installation Server on CentOS 7.x”
date: 2019-02-12T09:38:46
slug: configure-pxe-network-boot-installation-server-on-centos-7-x
yum install dhcp tftp tftp-server syslinux vsftpd xinetd
vi /etc/dhcp/dhcpd.conf
# DHCP Server Configuration file.
ddns-update-style interim;
ignore client-updates;
authoritative;
allow booting;
allow bootp;
allow unknown-clients;
# internal subnet for my DHCP Server
subnet 10.0.0.0 netmask 255.255.255.0 {
range 10.0.0.200 10.0.0.250;
option domain-name-servers 10.0.0.1;
option domain-name "openstack.local";
option routers 10.0.0.1;
option broadcast-address 10.0.0.255;
default-lease-time 600;
max-lease-time 7200;
next-server 10.0.0.5;
}
host controller.openstack.local {
hardware ethernet 52:54:00:37:2a:4e;
option host-name controller;
fixed-address 10.0.0.11;
filename "pxelinux.0";
}
vi /etc/xinetd.d/tftp
service tftp
{
socket\_type = dgram
protocol = udp
wait = yes
user = root
server = /usr/sbin/in.tftpd
server\_args = -s /var/lib/tftpboot
disable = no
per\_source = 11
cps = 100 2
flags = IPv4
}
cp -v /usr/share/syslinux/pxelinux.0 /var/lib/tftpboot
cp -v /usr/share/syslinux/menu.c32 /var/lib/tftpboot
cp -v /usr/share/syslinux/memdisk /var/lib/tftpboot
cp -v /usr/share/syslinux/mboot.c32 /var/lib/tftpboot
cp -v /usr/share/syslinux/chain.c32 /var/lib/tftpboot
mkdir /var/lib/tftpboot/pxelinux.cfg
mkdir /var/lib/tftpboot/networkboot
Mount Centos ISO and copy content to /var/ftp/pub/
mount /dev/cdrom /mnt
cd /mnt/
cp -av \* /var/ftp/pub/
cp /mnt/images/pxeboot/vmlinuz /var/lib/tftpboot/networkboot/
cp /mnt/images/pxeboot/initrd.img /var/lib/tftpboot/networkboot/
Generate Root Password
openssl passwd -1 Pxe@123#
$1$e2wrcGGX$tZPQKPsXVhNmbiGg53MN41
Create kickstart file (Replace Password and URLs)
vi /var/ftp/pub/centos7.cfg
#platform=x86, AMD64, or Intel EM64T
#version=DEVEL
# Firewall configuration
firewall --disabled
# Install OS instead of upgrade
install
# Use FTP installation media
url --url="ftp://10.0.0.5/pub/"
# Root password
rootpw --iscrypted $1$B8PHMhWg$89J2kxGGtxIc/RdA9/3OI1
# System authorization information
auth useshadow passalgo=sha512
# Use graphical install
graphical
firstboot disable
# System keyboard
keyboard de
# System language
lang en\_US
# SELinux configuration
selinux disabled
# Installation logging level
logging level=info
# System timezone
timezone Europe/Amsterdam
# System bootloader configuration
bootloader location=mbr
ignoredisk --only-use=sda
clearpart --all --initlabel
part swap --asprimary --fstype="swap" --size=1024 --ondisk=sda
part /boot --fstype xfs --size=300 --ondisk=sda
part pv.01 --size=1 --grow --ondisk=sda
volgroup root\_vg01 pv.01
logvol / --fstype xfs --name=lv\_01 --vgname=root\_vg01 --size=1 --grow
%packages
@^minimal
@core
%end
network --device=eth0 --bootproto=dhcp
network --device=eth1 --bootproto=static
%addon com\_redhat\_kdump --disable --reserve-mb='auto'
%end
%post
rpm -Uvh https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
yum -y install salt-minion
yum -y update
systemctl enable salt-minion
systemctl start salt-minion
echo "10.0.0.5 salt" >> /etc/hosts
sed -i -e 's/GRUB\_CMDLINE\_LINUX=.\*/GRUB\_CMDLINE\_LINUX="rd.lvm.lv=root\_vg01\/lv\_01 rhgb quiet net.ifnames=0 biosdevname=0"/' /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
echo "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin" >> /var/spool/cron/root
echo "@reboot salt-call state.apply > /tmp/out 2>&1" >> /var/spool/cron/root
%end
reboot
Create a PXE menu file
vi /var/lib/tftpboot/pxelinux.cfg/default
default menu.c32
prompt 0
timeout 30
MENU TITLE LinuxTechi.com PXE Menu
LABEL centos7\_x64
MENU LABEL CentOS 7\_X64
KERNEL /networkboot/vmlinuz
APPEND initrd=/networkboot/initrd.img inst.repo=ftp://10.0.0.5/pub ks=ftp://10.0.0.5/pub/centos7.cfg net.ifnames=0 biosdevname=0
Start Services
systemctl start xinetd
systemctl enable xinetd
systemctl start dhcpd.service
systemctl enable dhcpd.service
systemctl start vsftpd
systemctl enable vsftpd
Allow FTP connection (selinux)
setsebool -P allow\_ftpd\_full\_access 1
Allow Connections (Firewall)
firewall-cmd --add-service=ftp --permanent
firewall-cmd --add-service=dhcp --permanent
firewall-cmd --add-port=69/tcp --permanent
firewall-cmd --add-port=69/udp --permanent
firewall-cmd --add-port=4011/udp --permanent
firewall-cmd --add-port=4506/tcp --permanent
firewall-cmd --reload
Salt installieren
rpm -Uvh https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
yum -y install salt-master epel-release git
yum install python-pip
pip install --upgrade pip
export VERSION=develop
for s in $(python -c "import site; print(' '.join(site.getsitepackages()))"); do
pip install --install-option="--prefix=" --upgrade --force-reinstall -I \
-t "$s" git+https://github.com/salt-formulas/reclass.git@${VERSION};
done
mkdir -p /srv/salt/inventory/{classes,hosts}
mkdir -p /srv/salt/pillar/{file\_tree,roots/base}
mkdir -p /srv/salt/state/base/basenode
[root@localhost ~]# tree /srv/salt/
/srv/salt/
├── inventory
│ ├── classes
│ └── hosts
│ └── controller.openstack.yml
├── pillar
│ ├── file\_tree
│ └── roots
│ └── base
└── state
└── base
└── basenode
└── init.sls
10 directories, 2 files
vi /srv/salt/state/base/basenode/init.sls
mc:
pkg.installed
vi /srv/salt/inventory/hosts/controller.openstack.yml
environment:
base classes: {}
applications:
- basenode
vi /etc/salt/master.d/01-master.conf
keep\_jobs: 24
max\_open\_files: 16384
open\_mode: True
auto\_accept: True
state\_top: top.sls
reclass: &reclass
storage\_type: yaml\_fs
inventory\_base\_uri: /srv/salt/inventory
nodes\_uri: hosts
classes\_uri: classes
class\_mappings: []
master\_tops:
reclass: \*reclass
state\_output: changes
file\_roots:
base:
- /srv/salt/state/base
env\_order:
- base
hash\_type: sha256
file\_ignore\_regex:
- '/\.svn($|/)'
- '/\.git($|/)'
file\_ignore\_glob:
- '\*.pyc'
- '\*/somefolder/\*.bak'
- '\*.swp'
fileserver\_backend:
- roots
pillar\_roots:
base:
- /srv/salt/pillar/roots/base
ext\_pillar:
- reclass:
storage\_type: yaml\_fs
inventory\_base\_uri: /srv/salt/inventory
nodes\_uri: hosts
classes\_uri: classes
class\_mappings: []
- file\_tree:
root\_dir: /srv/salt/pillar/file\_tree
follow\_dir\_links: False
keep\_newline: True
pillar\_source\_merging\_strategy: smart
log\_level: warning
systemctl enable salt-master
systemctl start salt-master
NFS Freigabe für Salt Scripte
yum -y install nfs-utils
#firewall-cmd --permanent --zone=public --add-service=ssh
firewall-cmd --permanent --zone=public --add-service=nfs
firewall-cmd --reload
systemctl enable nfs-server.service
systemctl start nfs-server.service
/etc/exports
/srv/salt/ 10.0.0.0/24(rw,sync,no\_subtree\_check)
exportfs -a
Am Client:
sudo mount -o soft -t nfs 10.0.0.5:/srv/salt/ /home/tay/openstack/salt
Install Vault
yum install unzip
curl https://releases.hashicorp.com/vault/1.0.3/vault\_1.0.3\_linux\_amd64.zip -o vault\_1.0.3\_linux\_amd64.zip
unzip vault\_1.0.3\_linux\_amd64.zip
mv vault /usr/bin/
export VAULT\_ADDR='http://10.0.0.5:8200'
vi /root/vault.hcl
backend "file" {
path = "/var/lib/vault"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls\_disable = 1
}
vault server -config=/root/vault.hcl &
vault operator init
Unseal Key 1: hiR5fSIEaS5NErWwmH/KGpwo1UDROL4nW5SyCwk6kMNm
Unseal Key 2: iYJrQ3UCIuuqwonQJWu4JoLj+/ElQfrKxtnEfd/H83rC
Unseal Key 3: 6+sPo592bAA9n5VCz9agyIW7Xrsb5dBsP7YEpK9gdP6c
Unseal Key 4: fxKfoRb6IWL5+07X25AmoHGbjwypX7592VydbBl3jpPU
Unseal Key 5: ItaavrSMOR8Qn3Q2wd7BDEVd76k2NDczstZnqWyWndKw
Initial Root Token: s.5qhccbKwytnpX5sEjLDBp3hg
3 Times: until Unseal Progress 2/3 is 3/3:
vault operator unseal
vault login <Initial Root Token>
vault write secret/openstack/RABBIT\_PASS password="abc123"
Create Salt Token (Read and List)
vi salt-policy.hcl
path "openstack/\*" {
capabilities = ["read", "list"]
}
path "auth/\*" {
capabilities = ["read", "list","sudo","create","update","delete"]
}
vault policy write salt-policy salt-policy.hcl
vault token create -policy=salt-policy
Key Value
token s.PCGMAcQTrN505EA8BIB3dLku
token\_accessor avIpcDUGvSTJMUmb910eXTUG
token\_duration 768h
token\_renewable true
token\_policies ["default" "salt-policy"]
identity\_policies []
policies ["default" "salt-policy"]
Create Entries
for NAME in ADMIN\_PASS CINDER\_DBPASS CINDER\_PASS DASH\_DBPASS DEMO\_PASS GLANCE\_DBPASS GLANCE\_PASS KEYSTONE\_DBPASS METADATA\_SECRET NEUTRON\_DBPASS NEUTRON\_PASS NOVA\_DBPASS NOVA\_PASS PLACEMENT\_PASS PLACEMENT\_DBPASS RABBIT\_PASS
do
PW=`openssl rand -hex 10`
vault write secret/openstack/${NAME} password=${PW}
done
vi /etc/salt/master.d/vault.conf (with token from 2nd step obove – vault token create -policy=salt-policy )
vault:
url: http://10.0.0.5:8200
auth:
method: token
token: s.mIeRDNYMeGhJv2W5e96DGr7z
policies:
- salt-policy
vi salt-policy.hcl
path "secret/\*" {
capabilities = ["read", "list"]
}
path "auth/\*" {
capabilities = ["read", "list","sudo","create","update","delete"]
}
title: “mod_jk”
date: 2016-02-24T19:20:43
slug: mod_jk
CentOS again. Today I’m going to show you how to install and configure mod_jk in apache httpd using a server with CentOS. Currently this tutorial was tested on Centos 6.x and 7.x so you should run out of troubles if you stick with one of those versions.
So, what’s mod_jk? mod_jk is an apache httpd module used to make apache tomcat applications interact with a httpd server. In simple words, mod_jk allows us to connect an instance of tomcat with the apache httpd web server. This is useful for example if you have your httpd serving different kind of webapps (php, RoR, etc) and you want to publish a java app running on a tomcat instance. In this case, httpd run in port 80 and tomcat (usually) in port 8080, so we need to connect somehow the tomcat instance with httpd so our users can interact with our java app directly from port 80. In this case, the httpd server is giving us access to for example an internal network where your tomcat instances live. See the next diagram for a visual explanation:
This is indeed a good question. From a stackoverflow question/answer:
mod_proxy:
Pros:
No need for a separate module compilation and maintenance. mod_proxy, mod_proxy_http, mod_proxy_ajp and mod_proxy_balancer comes as part of standar Apache 2.2+ distribution.
Ability to use http/https or AJP protocols, even with the same balancer.
Cons:
mod_proxy_ajp does not support larke 8k+ packet sizes.
Basic load balancer.
Does not support Domain model clustering.
mod_jk:
Pros:
Advanced load balancer.
Advanced node failure detection.
Support for large AJP packet sizes.
Cons:
Need to build and mantain a separate module.
So, the discussion is there, no final answer. A good article covering this topic is: “Deciding between mod_jk, mod_proxy_http and mod_proxy_ajp” from Tomcat Experts.
The installation process for mod_jk is really simple but we’re going to need to compile the module first. Before doing any compile work, ensure you have both httpd and tomcat installed. Now:
yum install httpd-devel apr apr-devel apr-util apr-util-devel gcc gcc-c++ make autoconf libtool
| | |
| — | — |
|
1
|
yum install httpd-devel apr apr-devel apr-util apr-util-devel gcc gcc-c++ make autoconf libtool
|
Now, go to the official mod_jk website and download the latest version: http://tomcat.apache.org/download-connectors.cgi (1.2.41 at the published date of the post):
mkdir -p /opt/mod_jk/
cd /opt/mod_jk
wget http://www.eu.apache.org/dist/tomcat/tomcat-connectors/jk/tomcat-connectors-1.2.41-src.tar.gz
tar -xvzf tomcat-connectors-1.2.41-src.tar.gz
cd tomcat-connectors-1.2.41-src/native
| | |
| — | — |
|
1
2
3
4
5
|
mkdir -p /opt/mod_jk/
cd /opt/mod_jk
wget http://www.eu.apache.org/dist/tomcat/tomcat-connectors/jk/tomcat-connectors-1.2.41-src.tar.gz
tar -xvzf tomcat-connectors-1.2.41-src.tar.gz
cd tomcat-connectors-1.2.41-src/native
|
In the native folder (check the last step in the code above) we’re going to configure-make-make install the connector:
./configure –with-apxs=/usr/sbin/apxs
make
libtool –finish /usr/lib64/httpd/modules
make install
| | |
| — | — |
|
1
2
3
4
|
./configure –with-apxs=/usr/sbin/apxs
make
libtool –finish /usr/lib64/httpd/modules
make install
|
If all goes well you’re going to have the mod_jk.so installed on your /etc/httpd/modules folder.
First, lets enable the AJP connection on your tomcat server, in your server.xml configuration file:
vim $TOMCAT_HOME/conf/server.xml
| | |
| — | — |
|
1
|
vim $TOMCAT_HOME/conf/server.xml
|
Add under the
<!– Define an AJP 1.3 Connector on port 8009 –>
| | |
| — | — |
|
1
2
|
<!– Define an AJP 1.3 Connector on port 8009 –>
|
And modify the Engine tag so its looks like:
| | |
| — | — |
|
1
|
|
Observation 1: for each tomcat instance linked to your httpd server, you need to define a different jvmRoute parameter. For example, for a second instance you could use:
| | |
| — | — |
|
1
|
|
Now, lets go with the httpd configuration. First, create a mod_jk.conf file in your conf.d folder:
vim /etc/httpd/conf.d/mod_jk.conf
| | |
| — | — |
|
1
|
vim /etc/httpd/conf.d/mod_jk.conf
|
And populate the file with the following:
LoadModule jk_module “/etc/httpd/modules/mod_jk.so”
JkWorkersFile /etc/httpd/conf/workers.properties
JkShmFile /var/run/httpd/mod_jk.shm
JkLogFile /var/log/httpd/mod_jk.log
JkLogLevel info
JkLogStampFormat “[%a %b %d %H:%M:%S %Y] “
| | |
| — | — |
|
1
2
3
4
5
6
7
8
9
10
11
12
13
|
LoadModule jk_module “/etc/httpd/modules/mod_jk.so”
JkWorkersFile /etc/httpd/conf/workers.properties
# Where to put jk shared memory
JkShmFile /var/run/httpd/mod_jk.shm
# Where to put jk logs
JkLogFile /var/log/httpd/mod_jk.log
# Set the jk log level [debug/error/info]
JkLogLevel info
# Select the timestamp log format
JkLogStampFormat “[%a %b %d %H:%M:%S %Y] “
#JkRequestLogFormat “%w %V %T”
#JkEnvVar SSL_CLIENT_V_START worker1
|
Before continuing, create the folder to store the shared memory of the module:
mkdir -p /var/run/mod_jk
chown apache:apache /var/run/mod_jk
| | |
| — | — |
|
1
2
|
mkdir -p /var/run/mod_jk
chown apache:apache /var/run/mod_jk
|
Now, create the workers.properties file: (look at the JkWorkersFile property on mod_jk.conf file):
vim /etc/httpd/conf/worker.properties
| | |
| — | — |
|
1
|
vim /etc/httpd/conf/worker.properties
|
With the next content:
workers.apache_log=/var/log/httpd
worker.list=app1Worker
worker.stat1.type=status
worker.app1Worker.type=ajp13
worker.app1Worker.host=app1.myhost.com #put your app host here
worker.app1Worker.port=8009
| | |
| — | — |
|
1
2
3
4
5
6
7
|
workers.apache_log=/var/log/httpd
worker.list=app1Worker
worker.stat1.type=status
worker.app1Worker.type=ajp13
worker.app1Worker.host=app1.myhost.com #put your app host here
worker.app1Worker.port=8009
|
For every app server from tomcat to httpd you’re going to have a specific worker. Don’t forget to define the worker first in the worker.list property. For example, lets assume we’re going to add another app from tomcat:
workers.apache_log=/var/log/httpd
worker.list=app1Worker,app2Worker
worker.stat1.type=status
worker.app1Worker.type=ajp13
worker.app1Worker.host=app1.myhost.com #put your app host here
worker.app1Worker.port=8009
worker.app2Worker.type=ajp13
worker.app2Worker.host=app2.myhost.com #put your app host here
worker.app2Worker.port=8009
| | |
| — | — |
|
1
2
3
4
5
6
7
8
9
10
11
|
workers.apache_log=/var/log/httpd
worker.list=app1Worker,app2Worker
worker.stat1.type=status
worker.app1Worker.type=ajp13
worker.app1Worker.host=app1.myhost.com #put your app host here
worker.app1Worker.port=8009
worker.app2Worker.type=ajp13
worker.app2Worker.host=app2.myhost.com #put your app host here
worker.app2Worker.port=8009
|
Well, everything looks good now. The final step is to configure the VirtualHost for every app on httpd:
vim /etc/httpd/conf.d/app1.conf
| | |
| — | — |
|
1
|
vim /etc/httpd/conf.d/app1.conf
|
It’s a good practice to maintain your VirtualHosts in separated files. Now, in your recently created app1.conf file:
ServerName app1.myhost.com
ServerAdmin admin@myhost.com
LogFormat “%h %l %u %t \”%r\” %>s %b \”%{Referer}i\” \”%{User-agent}i\”” combined
CustomLog /var/log/httpd/app1_access.log combined
ErrorLog /var/log/httpd/app1_error.log
JkMount /* app1Worker
| | |
| — | — |
|
1
2
3
4
5
6
7
8
9
10
|
ServerName app1.myhost.com
ServerAdmin admin@myhost.com
LogFormat “%h %l %u %t \”%r\” %>s %b \”%{Referer}i\” \”%{User-agent}i\”” combined
CustomLog /var/log/httpd/app1_access.log combined
ErrorLog /var/log/httpd/app1_error.log
JkMount /* app1Worker
|
We are connecting httpd with tomcat using the JkMount directive in the VirtualHost configuration. If for example you’are adding a VirtualHost for your second app use the app2Worker configured previously and so on for other apps.
If you followed all the previous steps, you should be able to interact with your tomcat app directly from http://app1.myhost.com which is handled by httpd. Beautiful!
In this tutorial, we learned how to use mod_jk to connect different tomcat instances with the httpd web server. The procedure is straighforward but involves some compile tasks and a few configurations on each server. If you have any dobts don’t hesitate to initiate a converstion in the comments sections.
title: “yum groupinstall “Development Tools””
date: 2016-02-24T19:00:49
slug: yum-groupinstall-development-tools
yum groupinstall "Development Tools"
title: “open a port in firewall”
date: 2016-02-12T15:23:47
slug: open-a-port-in-firewall
firewall-cmd --zone=public --add-port=5666/tcp --permanent
firewall-cmd --reload
Permanent (-P)
/usr/sbin/setsebool -P httpd\_can\_network\_connect 1
title: “Allow httpd to allow external connection (reverse proxy)”
date: 2016-02-12T14:27:58
slug: allow-httpd-to-allow-external-connection-reverse-proxy
/usr/sbin/setsebool httpd\_can\_network\_connect 1
title: “Creating gfs cluster”
date: 2016-01-28T13:37:46
slug: creating-gfs-cluster
On each node:
yum -y install lvm2-cluster pacemaker pcs gfs2-utils
Start and enable the daemon by issuing the following commands on each node.
systemctl start pcsd.service
systemctl enable pcsd.service
As Root set a password for hacluster:
passwd hacluster
password:
Auf dem primary node:
pcs cluster auth RHEL centos
Username: hacluster
Password:
RHEL: Authorized
centos: Authorized
pcs cluster setup --name mycluster RHEL centos
Shutting down pacemaker/corosync services...
Redirecting to /bin/systemctl stop pacemaker.service
Redirecting to /bin/systemctl stop corosync.service
Killing any remaining services...
Removing all cluster configuration files...
RHEL: Succeeded
centos: Succeeded
Synchronizing pcsd certificates on nodes RHEL, centos...
RHEL: Success
centos: Success
Restaring pcsd on the nodes in order to reload the certificates...
RHEL: Success
centos: Success
That’s it. Corosync is configured across the cluster.
Start the cluster
pcs cluster start
Start Cluster on all nodes
pcs cluster start --all
Check Status
pcs cluster status
Verify Corosync Installation
corosync-cfgtool -s
mkfs -t gfs2 -p lock\_dlm -j 2 -t one:test /dev/<drbd-resource>
lvmconf --enable-cluster
systemctl reboot
title: “Bootsector”
date: 2016-01-18T14:32:29
slug: bootsector
The mbr size is as follows in bytes:
Where,446 + 64 + 2 = 512
446 bytes - Bootstrap.
64 bytes - Partition table.
2 bytes - Signature.
Hier als Beispiel /dev/sda
hexdump -s0 -n512 -C /dev/sda
hexdump -s0 -n512 -C /dev/sda1
dd if=/dev/zero of=/dev/sda bs=512 count=1
dd if=/dev/zero of=/dev/sda bs=446 count=1
dd the old good command which now backup partition tables even writes CDs ;). Backing up partition is nothing but actually backing up MBR (master boot record). The command is as follows for backing up MBR stored on
dd if=/dev/sda of=/tmp/sda-mbr.bin bs=512 count=1
Replace X with actual device name such as /dev/sda.
Now to restore partition table to disk, all you need to do is use dd command:
# dd if= sda-mbr.bin of=/dev/sda bs=1 count=64 skip=446 seek=446
title: “How to mount LVM partitions from rescue mode RHEL”
date: 2016-01-18T13:44:42
slug: how-to-mount-lvm-partitions-from-rescue-mode-rhel
Scan for volume groups:
lvm vgscan -v
Activate all volume groups:
lvm vgchange -a y
List logical volumes:
lvm lvs –all
With this information, and the volumes activated, you should be able to mount the volumes:
mount /dev/volumegroup/logicalvolume /mountpoint
title: “Resetting the Root Password”
date: 2016-01-18T12:42:55
slug: resetting-the-root-password
1.
Boot your system and wait until the GRUB2 menu appears.
2.
In the boot loader menu, highlight any entry and press e to edit it.
3.
Find the line beginning with linux. At the end of this line, append the following:
init=/bin/sh
IMPORTANT
Some systems (notably virtual machines) may have problems displaying correct output when you boot using this procedure. Some characters or even entire lines may be hidden, making the shell difficult to use. To solve this problem, delete the rhgb command from the linux line.
4.
Press F10 or Ctrl+X to boot the system using the options you just edited.
Once the system boots, you will be presented with a shell prompt without having to enter any user name or password:
sh-4.2#
5.
Load the installed SELinux policy (-i inital policy load. Only use this if this is the first time policy is being loaded since boot (usually called from initramfs).):
sh-4.2#
6.
Execute the following command to remount your root partition:
sh4.2#
7.
Reset the root password:
sh4.2#
When prompted to, enter your new root password and confirm by pressing the Enterkey. Enter the password for the second time to make sure you typed it correctly and confirm with Enter again. If both passwords match, a message informing you of a successful root password change will appear.
8.
Remount the root partition again, this time as read-only:
sh4.2#
9.
Reboot the system. From now on, you will be able to log in as the root user using the new password set up during this procedure.
Another Way:
Add the following parameter to kernel boot line in Grub:
rd.break enforcing=0
mount -o remount,rw /sysroot
chroot /sysroot
/usr/bin/password
touch /.autorelabel
mount -o remount,ro /