for i in $(cat systems_list)
do
sshpass -p "oldpassword@2016" ssh -q root@$i "echo "NEwpassword@2017" | passwd root --stdin"
done
# chown root:root /
#Subsystem sftp /usr/libexec/openssh/sftp-server
Subsystem sftp internal-sftp
/etc/ssh/sshd_config
file. Match Group jailUsers
ChrootDirectory /chroots/%u
AllowTcpForwarding no
ForceCommand internal-sftp
X11Forwarding no
# groupadd jailUsers
# usermod -g jailUsers
-s /bin/false user
#useradd -d /myhome -M -g jailUsers
-s /bin/false user
# passwd user
# mkdir -p /chroots/user ; chmod -R 755 /chroots/user
# mkdir /chroots/user/myhome ; chown user:jailUsers
/chroots/user/myhome
fence-agents-all-4.0.11-27.el7_2.5.x86_64
(or greater)pacemaker-1.1.13-10.el7_2.2.x86_64
(or greater)resource-agents-3.9.5-54.el7_2.6.x86_64
(or greater)no-shared-storage
option, you are likely to receive an InvalidSharedStorage error during evacuation, and instances will not power up on the other node. However, if all your instances are configured to boot up from a Block Storage (cinder) volume, then you will not need shared storage for storing the disk image of instances; you will be able to evacuate all instances using the no-shared-storage
option. During evacuation, if your instances are configured to boot from a Block Storage (cinder) volume, any evacuated instances can be expected to boot up from the same cinder volume, but on another Compute node. As a result, the evacuated instances are able to immediately restart their jobs, as the OS image and application data are kept on the Cinder volume.no_shared_storage=1
option in step 7.sudo openstack-service stop
sudo openstack-service disable
sudo systemctl stop libvirtd
sudo systemctl disable libvirtd
sudo mkdir -p /etc/pacemaker/
sudo dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1
sudo cp /etc/pacemaker/authkey ./
sudo chown heat-admin:heat-admin authkey
scp heat-admin@compute-1:~/ ./
scp authkey heat-admin@node-n:~/
sudo mkdir -p --mode=0750 /etc/pacemaker/
sudo chgrp haclient /etc/pacemaker
sudo mv authkey /etc/pacemaker/
sudo chown root:haclient /etc/pacemaker/authkey
sudo systemctl enable pacemaker_remote
sudo systemctl start pacemaker_remote
), fence-agents (
fence-agents-all-4.0.11-27.el7_2.5.x86_64) and resource-agents (
3.9.5-54.el7_2.6.x86_64`) packages are installed on the controller and Compute nodes:sudo rpm -qa | egrep '(pacemaker|fence-agents|resource-agents)'
sudo pcs constraint order start openstack-nova-novncproxy-clone then openstack-nova-api-clone
sudo pcs constraint order start rabbitmq-clone then openstack-keystone-clone
sudo pcs constraint order promote galera-master then openstack-keystone-clone
sudo pcs constraint order start haproxy-clone then openstack-keystone-clone
sudo pcs constraint order start memcached-clone then openstack-keystone-clone
sudo pcs constraint order promote redis-master then start openstack-ceilometer-central-clone require-all=false
sudo pcs resource defaults resource-stickiness=INFINITY
sudo pcs config | grep systemd | awk '{print $2}' | while read RESOURCE; do sudo pcs resource update $RESOURCE op start timeout=200s op stop timeout=200s; done"
auth_url
, username
, tenant
and password
values:scp overcloudrc heat-admin@controller-1:~/
. ~/overcloudrc
sudo pcs resource create nova-evacuate ocf:openstack:NovaEvacuate auth_url=$OS_AUTH_URL username=$OS_USERNAME \
password=$OS_PASSWORD tenant_name=$OS_TENANT_NAME
resource create ...
command above. See Exception for shared storage for more information.for i in $(sudo pcs status | grep IP | awk '{ print $1 }'); do sudo pcs constraint order start $i then nova-evacuate ; done
for i in openstack-glance-api-clone neutron-metadata-agent-clone openstack-nova-conductor-clone; do \
sudo pcs constraint order start $i then nova-evacuate require-all=false ; done
sudo pcs resource disable openstack-keystone --wait=540s
540
is only an example. If you experience issues, you can calculate a timeout period suitable for your environment.120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
120s
600s
. This can be considered a suitable value to begin testing with. You can validate your timeout calculations using pcs resource:pcs resource show openstack-ceilometer-central
Resource: openstack-ceilometer-central (class=systemd type=openstack-ceilometer-central)
Operations: start interval=0s timeout=120s
(openstack-ceilometer-central-start-interval-0s)
monitor interval=60s
(openstack-ceilometer-central-monitor-interval-60s)
stop interval=0s timeout=120s
(openstack-ceilometer-central-stop-interval-0s)
cibadmin
data :controllers=$(sudo cibadmin -Q -o nodes | grep uname | sed s/.*uname..// | awk -F\" '{print $1}')
echo $controllers
osprole=controller
property:for controller in ${controllers}; do sudo pcs property set --node ${controller} osprole=controller ; done
stonithdevs=$(sudo pcs stonith | awk '{print $1}')
**heat-admin@controller-1 #**
echo $stonithdevs`for i in $(sudo cibadmin -Q --xpath //primitive --node-path | tr ' ' '\n' | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do \
found=0
if [ -n "$stonithdevs" ]; then
for x in $stonithdevs; do
if [ $x = $i ]; then
found=1
fi
done
fi
if [ $found = 0 ]; then
sudo pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller
fi
done
sudo pcs resource create neutron-openvswitch-agent-compute \
systemd:neutron-openvswitch-agent --clone interleave=true --disabled --force
sudo pcs constraint location neutron-openvswitch-agent-compute-clone \
rule resource-discovery=exclusive score=0 osprole eq compute
sudo pcs constraint order start neutron-server-clone then \
neutron-openvswitch-agent-compute-clone require-all=false
sudo pcs resource create libvirtd-compute systemd:libvirtd --clone interleave=true --disabled --force
sudo pcs constraint location libvirtd-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
sudo pcs constraint order start neutron-openvswitch-agent-compute-clone then libvirtd-compute-clone
sudo pcs constraint colocation add libvirtd-compute-clone with neutron-openvswitch-agent-compute-clone
sudo pcs resource create ceilometer-compute systemd:openstack-ceilometer-compute --clone interleave=true --disabled --force
sudo pcs constraint location ceilometer-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
sudo pcs constraint order start openstack-ceilometer-notification-clone then ceilometer-compute-clone require-all=false
sudo pcs constraint order start libvirtd-compute-clone then ceilometer-compute-clone
sudo pcs constraint colocation add ceilometer-compute-clone with libvirtd-compute-clone
. /home/heat-admin/overcloudrc
sudo pcs resource create nova-compute-checkevacuate ocf:openstack:nova-compute-wait auth_url=$OS_AUTH_URL username=$OS_USERNAME password=$OS_PASSWORD tenant_name=$OS_TENANT_NAME domain=localdomain op start timeout=300 --clone interleave=true --disabled --force
sudo pcs constraint location nova-compute-checkevacuate-clone rule resource-discovery=exclusive score=0 osprole eq compute
sudo pcs constraint order start openstack-nova-conductor-clone then nova-compute-checkevacuate-clone require-all=false
sudo pcs resource create nova-compute systemd:openstack-nova-compute --clone interleave=true --disabled --force
sudo pcs constraint location nova-compute-clone rule resource-discovery=exclusive score=0 osprole eq compute
sudo pcs constraint order start nova-compute-checkevacuate-clone then nova-compute-clone require-all=true
sudo pcs constraint order start nova-compute-clone then nova-evacuate require-all=false
sudo pcs constraint order start libvirtd-compute-clone then nova-compute-clone
sudo pcs constraint colocation add nova-compute-clone with libvirtd-compute-clone
$IPMILAN_USERNAME
and $IPMILAN_PASSWORD
values to suit your IPMI device:sudo pcs stonith create ipmilan-overcloud-compute-0 fence_ipmilan
pcmk_host_list=overcloud-compute-0 ipaddr=10.35.160.78 login=$IPMILAN_USERNAME passwd=$IPMILAN_PASSWORD lanplus=1 cipher=1 op monitor interval=60s`. overcloudrc
sudo pcs stonith create fence-nova fence_compute \
auth-url=$OS_AUTH_URL \
login=$OS_USERNAME \
passwd=$OS_PASSWORD \
tenant-name=$OS_TENANT_NAME \
domain=localdomain \
record-only=1 --force
sudo pcs property set cluster-recheck-interval=1min
sudo pcs resource create overcloud-compute-n ocf:pacemaker:remote reconnect_interval=60 op monitor interval=20
sudo pcs property set --node overcloud-compute-n osprole=compute
sudo pcs stonith level add 1 overcloud-compute-0 ipmilan-overcloud-compute-0,fence-nova
sudo pcs stonith
sudo pcs resource enable openstack-keystone
sudo pcs resource enable neutron-openvswitch-agent-compute
sudo pcs resource enable libvirtd-compute
sudo pcs resource enable ceilometer-compute
sudo pcs resource enable nova-compute-checkevacuate
sudo pcs resource enable nova-compute
sleep 60
sudo pcs resource cleanup
sudo pcs status
sudo property set stonith-enabled=true
. overcloudrc
nova boot --image cirros --flavor 2 test-failover
nova list --fields name,status,host
. stackrc
ssh -lheat-admin compute-n
sudo su -
echo c > /proc/sysrq-trigger
nova list --fields name,status,host
nova service-list