===LVM======
1) To create PV
pvs
[root@rhel1 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 rhel lvm2 a-- <9.00g 0
/dev/sdb ora-vg lvm2 a-- 508.00m 508.00m
/dev/sdc ora-vg lvm2 a-- 508.00m 508.00m
/dev/sdd lvm2 --- 512.00m 512.00m
[root@rhel1 ~]# pvcreate /dev/sde
Physical volume "/dev/sde" successfully created.
[root@rhel1 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 rhel lvm2 a-- <9.00g 0
/dev/sdb ora-vg lvm2 a-- 508.00m 508.00m
/dev/sdc ora-vg lvm2 a-- 508.00m 508.00m
/dev/sdd lvm2 --- 512.00m 512.00m
/dev/sde lvm2 --- 512.00m 512.00m
=>to create VG
[root@rhel1 ~]# vgcreate ora-vg /dev/sdb /dev/sdc
Volume group "ora-vg" successfully created
[root@rhel1 ~]#
[root@rhel1 ~]#
[root@rhel1 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 2 0 0 wz--n- 1016.00m 1016.00m
rhel 1 2 0 wz--n- <9.00g 0
[root@rhel1 ~]#
=> to create LV
[root@rhel1 ~]# lvcreate -L +1000m -n data1 ora-vg
Logical volume "data1" created.
[root@rhel1 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi-a----- 1000.00m
root rhel -wi-ao---- <8.00g
swap rhel -wi-ao---- 1.00g
[root@rhel1 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 2 1 0 wz--n- 1016.00m 16.00m
rhel 1 2 0 wz--n- <9.00g 0
===> create FS
[root@rhel1 ~]# mkfs.ext4 /dev/mapper/ora--vg-data1
mke2fs 1.42.9 (28-Dec-2013)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
64000 inodes, 256000 blocks
12800 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=262144000
8 block groups
32768 blocks per group, 32768 fragments per group
8000 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Allocating group tables: done
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done
===> mount FS
[root@rhel1 ~]# mkdir /data1
[root@rhel1 ~]# mount -t ext4 /dev/mapper/ora--vg-data1 /data1
[root@rhel1 ~]# df -h /data1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 969M 2.5M 900M 1% /data1
[root@rhel1 ~]#
====> To extend FS
1)create PV
2)Add PV to VG
3)extend Volume/resize volume
#pvcreate /dev/sdd
#vgextend ora-vg /dev/sdd
#lvextend -L +100m /dev/mapper/ora--vg-data1
#umount /data1
[root@rhel1 mapper]# resize2fs /dev/mapper/ora--vg-data1
resize2fs 1.42.9 (28-Dec-2013)
Resizing the filesystem on /dev/mapper/ora--vg-data1 to 281600 (4k) blocks.
The filesystem on /dev/mapper/ora--vg-data1 is now 281600 blocks long.
[root@rhel1 mapper]# mount /dev/mapper/ora--vg-data1 /data1
[root@rhel1 mapper]# df -h /data1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 1.1G 2.5M 993M 1% /data1
or --> online
[root@rhel1 ~]# lvresize -L +100m -r /dev/mapper/ora--vg-data1
Size of logical volume ora-vg/data1 changed from 1.46 GiB (375 extents) to 1.56 GiB (400 extents).
Logical volume ora-vg/data1 successfully resized.
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/mapper/ora--vg-data1 is mounted on /data1; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 1
The filesystem on /dev/mapper/ora--vg-data1 is now 409600 blocks long.
[root@rhel1 ~]# df -h /data1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 1.6G 3.0M 1.5G 1% /data1
=====
To RUN FSCK
root@rhel1 ~]# e2fsck -f /dev/mapper/ora--vg-data1
e2fsck 1.42.9 (28-Dec-2013)
Resize inode not valid. Recreate<y>? yes
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
Free blocks count wrong for group #0 (28117, counted=28118).
Fix<y>? yes
Free blocks count wrong (285661, counted=285662).
Fix<y>? yes
/dev/mapper/ora--vg-data1: ***** FILE SYSTEM WAS MODIFIED *****
/dev/mapper/ora--vg-data1: 11/72000 files (0.0% non-contiguous), 9250/294912 blocks
[root@rhel1 ~]# e2fsck -f /dev/mapper/ora--vg-data1
e2fsck 1.42.9 (28-Dec-2013)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/mapper/ora--vg-data1: 11/72000 files (0.0% non-contiguous), 9250/294912 blocks
[root@rhel1 ~]# e2fsck -f /dev/mapper/ora--vg-data1
e2fsck 1.42.9 (28-Dec-2013)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/mapper/ora--vg-data1: 11/72000 files (0.0% non-contiguous), 9250/294912 blocks
=========>
TO export VGS
[root@rhel1 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 4 1 0 wz--n- 1.98g 432.00m
rhel 1 2 0 wz--n- <9.00g 0
[root@rhel1 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi-ao---- 1.56g
root rhel -wi-ao---- <8.00g
swap rhel -wi-ao---- 1.00g
[root@rhel1 ~]#
[root@rhel1 ~]# df -h /data1
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 1.6G 3.0M 1.5G 1% /data1
[root@rhel1 ~]#
[root@rhel1 ~]# umount /data1
[root@rhel1 ~]#
[root@rhel1 ~]# lvchange -an ora-vg
[root@rhel1 ~]#
[root@rhel1 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi------- 1.56g
root rhel -wi-ao---- <8.00g
swap rhel -wi-ao---- 1.00g
[root@rhel1 ~]#
[root@rhel1 ~]# vgchange -an ora-vg
0 logical volume(s) in volume group "ora-vg" now active
[root@rhel1 ~]#
[root@rhel1 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 4 1 0 wz--n- 1.98g 432.00m
rhel 1 2 0 wz--n- <9.00g 0
[root@rhel1 ~]#
[root@rhel1 ~]# vgexport ora-vg
Volume group "ora-vg" successfully exported
=> To do DR Node
[root@rhel2 ~]# vgimport ora-vg
Volume group "ora-vg" successfully imported
[[root@rhel2 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 4 1 0 wz--n- 1.98g 432.00m
rhel 1 1 0 wz--n- 7.00g 0
[root@rhel2 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi------- 1.56g
root rhel -wi-ao---- 7.00g
[root@rhel2 ~]# vgchange -ay ora-vg
1 logical volume(s) in volume group "ora-vg" now active
[root@rhel2 ~]#
[root@rhel2 ~]# lvchange -ay ora-vg
[root@rhel2 ~]#
[root@rhel2 ~]#
[root@rhel2 ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi-a----- 1.56g
root rhel -wi-ao---- 7.00g
[root@rhel2 ~]#
[root@rhel2 ~]#
[root@rhel2 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 4 1 0 wz--n- 1.98g 432.00m
rhel 1 1 0 wz--n- 7.00g 0
[root@rhel2 ~]#
[root@rhel2 ~]# mount /dev/mapper/ora--vg-data1 /mnt
[root@rhel2 mnt]# df -h /mnt
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 1.6G 3.0M 1.5G 1% /mnt
=====
to restore VGS -configuration
[root@rhel1 ~]# vgcfgbackup
Volume group "ora-vg" successfully backed up.
Volume group "rhel" successfully backed up.
root@rhel1 archive]# vgremove ora-vg
Do you really want to remove volume group "ora-vg" containing 1 logical volumes? [y/n]: y
Do you really want to remove active logical volume ora-vg/data1? [y/n]: y
Logical volume "data1" successfully removed
Volume group "ora-vg" successfully removed
[root@rhel1 archive]# vgcfgrestore -f /etc/lvm/archive/ora-vg_00012-467063718.vg ora-vg
Restored volume group ora-vg
[root@rhel1 archive]# vgs
VG #PV #LV #SN Attr VSize VFree
ora-vg 4 1 0 wz--n- 1.98g 432.00m
rhel 1 2 0 wz--n- <9.00g 0
[root@rhel1 archive]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
data1 ora-vg -wi------- 1.56g
root rhel -wi-ao---- <8.00g
swap rhel -wi-ao---- 1.00g
======
root@rhel1 mapper]# vgchange -ay ora-vg
1 logical volume(s) in volume group "ora-vg" now active
[root@rhel1 mapper]# lvchange -ay ora-vg
root@rhel1 mapper]# mount /dev/mapper/ora--vg-data1 /mnt
[root@rhel1 mapper]# df -h /mnt
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/ora--vg-data1 1.6G 19M 1.5G 2% /mnt
[root@rhel1 mapper]#
===============
No comments:
Post a Comment