LVM

yoni·2023년 4월 17일

Linux

목록 보기
10/16

swap

  1. device swap <- 지금은 이것만 사용
  • 파티셔닝을 하고, 파티셔닝된 디스크에 mkswap을 통해 swap을 만든다.
  • 그 후, swapon을 통해 swap을 활성화 한다.
  1. file swap
  2. psudo swap
@@VM
[root@localhost ~]# lsblk -f
NAME        FSTYPE      LABEL UUID                                   MOUNTPOINT
sda                                                                  
├─sda1      xfs               243550a7-03de-42be-a6f4-726a8ea2a6ce   /white
├─sda2                                                               
├─sda5      ext4              571a7ee6-2340-4337-8741-10aae94af0fb   /black
└─sda6                                                               
sdb                                                                  
└─sdb2                                                               
vda                                                                  
├─vda1      xfs               0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2      LVM2_member       CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root xfs               999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap swap              faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP]

[root@localhost ~]# mkswap /dev/sdb2
Setting up swapspace version 1, size = 291836 KiB
no label, UUID=7fe3d0ac-3d51-4f17-868d-c3d428306179

[root@localhost ~]# lsblk -f
NAME        FSTYPE      LABEL UUID                                   MOUNTPOINT
sda                                                                  
├─sda1      xfs               243550a7-03de-42be-a6f4-726a8ea2a6ce   /white
├─sda2                                                               
├─sda5      ext4              571a7ee6-2340-4337-8741-10aae94af0fb   /black
└─sda6                                                               
sdb                                                                  
└─sdb2      swap              7fe3d0ac-3d51-4f17-868d-c3d428306179   
vda                                                                  
├─vda1      xfs               0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2      LVM2_member       CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root xfs               999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap swap              faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP]

[root@localhost ~]# swapon -s
Filename				Type		Size	Used	Priority
/dev/dm-1                              	partition	2097148	0	-1
[root@localhost ~]# swapon /dev/sdb2 
[root@localhost ~]# swapon -s
Filename				Type		Size	Used	Priority
/dev/dm-1                              	partition	2097148	0	-1
/dev/sdb2                              	partition	291836	0	-2



> 물리컴퓨터
[root@station14 ~]# ssh yoni@192.168.122.108
The authenticity of host '192.168.122.108 (192.168.122.108)' can't be established.
ECDSA key fingerprint is d3:2b:d4:c6:8a:41:dd:d3:ea:00:87:33:f7:69:94:4e.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.122.108' (ECDSA) to the list of known hosts.
Welcome

yoni@192.168.122.108's password: 
Last login: Thu Apr 13 14:03:05 2023 from localhost
[yoni@localhost ~]$ cd /white
[yoni@localhost white]$ ll
total 0
[yoni@localhost white]$ 

> VM
[root@localhost ~]# umount /white
umount: /white: target is busy.
        (In some cases useful info about processes that use
         the device is found by lsof(8) or fuser(1))

[root@localhost ~]# fuser -cu /white
/white:               2496c(yoni)

[root@localhost ~]# ps -ef | grep 2496
yoni      2496  2495  0 10:11 pts/1    00:00:00 -bash
root      2588  2144  0 10:14 pts/0    00:00:00 grep --color=auto 2496

[root@localhost ~]# lsof /white/
COMMAND  PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
bash    2496 yoni  cwd    DIR    8,1        6   64 /white

> kill-9 pid or fuser -kcu /white (kill시킴)
[root@localhost ~]# fuser -kcu /white
/white:               2496c(yoni)


> umount 후 확인
[root@localhost ~]# umount /white
[root@localhost ~]# df -h
Filesystem           Size  Used Avail Use% Mounted on
/dev/mapper/cl-root   17G  4.7G   13G  28% /
devtmpfs             2.0G     0  2.0G   0% /dev
tmpfs                2.0G  144K  2.0G   1% /dev/shm
tmpfs                2.0G  8.9M  2.0G   1% /run
tmpfs                2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1           1014M  157M  858M  16% /boot
/dev/sda5            190M  1.6M  175M   1% /black
tmpfs                396M  4.0K  396M   1% /run/user/42
tmpfs                396M   16K  396M   1% /run/user/0


> label 붙이기
[root@localhost ~]# lsblk -f
NAME        FSTYPE      LABEL UUID                                   MOUNTPOINT
sda                                                                  
├─sda1      xfs               243550a7-03de-42be-a6f4-726a8ea2a6ce   
├─sda2                                                               
├─sda5      ext4              571a7ee6-2340-4337-8741-10aae94af0fb   /black
└─sda6                                                               
sdb                                                                  
└─sdb2      swap              7fe3d0ac-3d51-4f17-868d-c3d428306179   [SWAP]
vda                                                                  
├─vda1      xfs               0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2      LVM2_member       CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root xfs               999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap swap              faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP능
  
[root@localhost ~]# xfs_admin -L white_disk /dev/sda1
writing all SBs
new label = "white_disk"
[root@localhost ~]# lsblk -f
NAME        FSTYPE      LABEL      UUID                                   MOUNTPOINT
sda                                                                       
├─sda1      xfs         white_disk 243550a7-03de-42be-a6f4-726a8ea2a6ce   
├─sda2                                                                    
├─sda5      ext4                   571a7ee6-2340-4337-8741-10aae94af0fb   /black
└─sda6                                                                    
sdb                                                                       
└─sdb2      swap                   7fe3d0ac-3d51-4f17-868d-c3d428306179   [SWAP]
vda                                                                       
├─vda1      xfs                    0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2      LVM2_member            CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root xfs                    999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap swap                   faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP]
  
[root@localhost ~]# e2label /dev/sda5 b_disk
[root@localhost ~]# lsblk -f
NAME        FSTYPE      LABEL      UUID                                   MOUNTPOINT
sda                                                                       
├─sda1      xfs         white_disk 243550a7-03de-42be-a6f4-726a8ea2a6ce   
├─sda2                                                                    
├─sda5      ext4        b_disk     571a7ee6-2340-4337-8741-10aae94af0fb   /black
└─sda6                                                                    
sdb                                                                       
└─sdb2      swap                   7fe3d0ac-3d51-4f17-868d-c3d428306179   [SWAP]
vda                                                                       
├─vda1      xfs                    0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2      LVM2_member            CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root xfs                    999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap swap                   faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP]

> mount 장치명 디렉토리 대신, mount 라벨명 디렉토리 가능
[root@localhost ~]# mount LABEL=white_disk /white

[root@localhost ~]# df -h
Filesystem           Size  Used Avail Use% Mounted on
/dev/mapper/cl-root   17G  4.7G   13G  28% /
devtmpfs             2.0G     0  2.0G   0% /dev
tmpfs                2.0G  144K  2.0G   1% /dev/shm
tmpfs                2.0G  8.9M  2.0G   1% /run
tmpfs                2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1           1014M  157M  858M  16% /boot
/dev/sda5            190M  1.6M  175M   1% /black
tmpfs                396M  4.0K  396M   1% /run/user/42
tmpfs                396M   16K  396M   1% /run/user/0
/dev/sda1            297M   16M  282M   6% /white


> 파일 시스템의 속성
root@localhost ~]# xfs_info /dev/sda1
meta-data=/dev/sda1              isize=512    agcount=4, agsize=19200 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0 spinodes=0
data     =                       bsize=4096   blocks=76800, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal               bsize=4096   blocks=855, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0


> tune2fs : 파일시스템 튜닝
– 특정 파일시스템의 수퍼블록정보 확인하기
– dumpe2fs 명령어를 사용한 것과 동일한 결과를 출력함.

[root@localhost ~]# tune2fs -l /dev/sda5
tune2fs 1.42.9 (28-Dec-2013)
Filesystem volume name:   b_disk
Last mounted on:          <not available>
Filesystem UUID:          571a7ee6-2340-4337-8741-10aae94af0fb
Filesystem magic number:  0xEF53
Filesystem revision #:    1 (dynamic)
Filesystem features:      has_journal ext_attr resize_inode dir_index filetype needs_recovery extent 64bit flex_bg sparse_super huge_file uninit_bg dir_nlink extra_isize
Filesystem flags:         signed_directory_hash 
Default mount options:    user_xattr acl
Filesystem state:         clean
Errors behavior:          Continue
Filesystem OS type:       Linux
Inode count:              51200
Block count:              204800
Reserved block count:     10240
Free blocks:              192685
Free inodes:              51189
First block:              1
Block size:               1024
Fragment size:            1024
Group descriptor size:    64
Reserved GDT blocks:      256
Blocks per group:         8192
Fragments per group:      8192
Inodes per group:         2048
Inode blocks per group:   256
Flex block group size:    16
Filesystem created:       Fri Apr 14 16:44:01 2023
Last mount time:          Mon Apr 17 09:57:01 2023
Last write time:          Mon Apr 17 10:21:18 2023
Mount count:              4
Maximum mount count:      -1
Last checked:             Fri Apr 14 16:44:01 2023
Check interval:           0 (<none>)
Lifetime writes:          11 MB
Reserved blocks uid:      0 (user root)
Reserved blocks gid:      0 (group root)
First inode:              11
Inode size:	          128
Journal inode:            8
Default directory hash:   half_md4
Directory Hash Seed:      a6b9914f-0200-411a-9f6a-b621027224d4
Journal backup:           inode blocks


> umount 상태에서 파일 시스템 체크 가능
[root@localhost ~]# xfs_repair /dev/sda1
xfs_repair: /dev/sda1 contains a mounted filesystem
xfs_repair: /dev/sda1 contains a mounted and writable filesystem

fatal error -- couldn't initialize XFS library
[root@localhost ~]# umount /dev/sda1
[root@localhost ~]# xfs_repair /dev/sda1
Phase 1 - find and verify superblock...
Phase 2 - using internal log
        - zero log...
        - scan filesystem freespace and inode maps...
        - found root inode chunk
Phase 3 - for each AG...
        - scan and clear agi unlinked lists...
        - process known inodes and perform inode discovery...
        - agno = 0
        - agno = 1
        - agno = 2
        - agno = 3
        - process newly discovered inodes...
Phase 4 - check for duplicate blocks...
        - setting up duplicate extent list...
        - check for inodes claiming duplicate blocks...
        - agno = 0
        - agno = 1
        - agno = 2
        - agno = 3
Phase 5 - rebuild AG headers and trees...
        - reset superblock...
Phase 6 - check inode connectivity...
        - resetting contents of realtime bitmap and summary inodes
        - traversing filesystem ...
        - traversal finished ...
        - moving disconnected inodes to lost+found ...
Phase 7 - verify and correct link counts...
done
[root@localhost ~]# umount /dev/sda5
[root@localhost ~]# e2fsck /dev/sda5
e2fsck 1.42.9 (28-Dec-2013)
b_disk: clean, 11/51200 files, 12115/204800 blocks

> XFS filesystem 명령어
xfs_repair : 파일 시스템 체크
xfs_growfs : 파일 시스템 사이즈 늘릴 때

> 파티션을 나누는 행위 -> 확장 가능성이 떨어짐
백업 후 unmount, 파티션 뿌수고 (fdisk, parted), 파티션 확장, mount, 다시 복구 등... 
=> LVM을 통해 유용하게 디스크를 관리하자!

LVM

LVM의 핵심 키워드

  • PV (Physical Volumes)
    • 찰흙 덩어리 하나
    • 즉, 물리적인 디스크 덩어리
    • 이 PV를 모아 모아서, 커다란 덩어리로 만들 수 있음 => VG
  • VG (Volume Groups)
    • PV를 조합하여 VG 큰 덩어리
  • LV (Logical Volumes)
    • VG에서 얼마만큼 떼어내서 디스크 영역을 만드는 것이 LV
  • Extent
    • 초콜렛에서 잘라 먹기 편하도록 금이 가있음
    • 즉, LVM의 관리 및 용량 할당의 최소 단위를 의미함
    • Extent의 기본 값: 4MB (옛날 디스크 기본 값으로, 요즘은 주로 2^n 값을 사용)
    • VG(volume groups) 만들 때, 사용한다. (세팅한다.)
    • Logical Volume 쪽에서 바라보면, Logical Extents
    • Physical Volume 쪽에서 바라보면, Physical Extents
[root@localhost ~]# lsblk
NAME        MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda           8:0    0   1G  0 disk 
sdb           8:16   0   1G  0 disk 
sdc           8:32   0   1G  0 disk 
vda         252:0    0  20G  0 disk 
├─vda1      252:1    0   1G  0 part /boot
└─vda2      252:2    0  19G  0 part 
  ├─cl-root 253:0    0  17G  0 lvm  /
  └─cl-swap 253:1    0   2G  0 lvm  [SWAP]

[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/vda2  cl lvm2 a--  19.00g    0 

[root@localhost ~]# pvcreate /dev/sda /dev/sdb
  Physical volume "/dev/sda" successfully created.
  Physical volume "/dev/sdb" successfully created.

[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/sda      lvm2 ---   1.00g 1.00g
  /dev/sdb      lvm2 ---   1.00g 1.00g
  /dev/vda2  cl lvm2 a--  19.00g    0 

[root@localhost ~]# pvdisplay /dev/sda
  "/dev/sda" is a new physical volume of "1.00 GiB"
  --- NEW Physical volume ---
  PV Name               /dev/sda
  VG Name               
  PV Size               1.00 GiB
  Allocatable           NO
  PE Size               0   
  Total PE              0
  Free PE               0
  Allocated PE          0
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
   
   
> vgs :  volume group 스캔   
[root@localhost ~]# vgs
  VG #PV #LV #SN Attr   VSize  VFree
  cl   1   2   0 wz--n- 19.00g    0 

> volume group create (test 라는 group에 sda와 sdb 덩어리를 이용해 VG 만들어 보자)
[root@localhost ~]# vgcreate test /dev/sda /dev/sdb
  Volume group "test" successfully created
  
[root@localhost ~]# vgs
  VG   #PV #LV #SN Attr   VSize  VFree
  cl     1   2   0 wz--n- 19.00g    0 
  test   2   0   0 wz--n-  1.99g 1.99g
  
> sda PV 를 확인해보면, test라는 VG에 할당됨을 확인 가능.
extent는 VG가 할당될 떄 할당됨. deafault가 4MB인 것 확인 가능.
[root@localhost ~]# pvdisplay /dev/sda
  --- Physical volume ---
  PV Name               /dev/sda
  VG Name               test
  PV Size               1.00 GiB / not usable 4.00 MiB
  Allocatable           yes 
  PE Size               4.00 MiB
  Total PE              255
  Free PE               255
  Allocated PE          0
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
  
> test라는 VG 삭제 해보자
[root@localhost ~]# vgremove test
  Volume group "test" successfully removed
[root@localhost ~]# vgs
  VG #PV #LV #SN Attr   VSize  VFree
  cl   1   2   0 wz--n- 19.00g    0 

> vgcolor라는 VG 다시 생성해보는데 extent사이즈를 16MB로 해보자.
[root@localhost ~]# vgcreate -s 16M vgcolor /dev/sda /dev/sdb
  Volume group "vgcolor" successfully created
  
[root@localhost ~]# vgs
  VG      #PV #LV #SN Attr   VSize  VFree
  cl        1   2   0 wz--n- 19.00g    0 
  vgcolor   2   0   0 wz--n-  1.97g 1.97g
  
[root@localhost ~]# pvdisplay /dev/sda
  --- Physical volume ---
  PV Name               /dev/sda
  VG Name               vgcolor
  PV Size               1.00 GiB / not usable 16.00 MiB
  Allocatable           yes 
  PE Size               16.00 MiB
  Total PE              63
  Free PE               63
  Allocated PE          0
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
   
> VG (volume group)에 대한 자세한 정보 출력
[root@localhost ~]# vgdisplay vgcolor
  --- Volume group ---
  VG Name               vgcolor
  System ID             
  Format                lvm2
  Metadata Areas        2
  Metadata Sequence No  1
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                0
  Open LV               0
  Max PV                0
  Cur PV                2
  Act PV                2
  VG Size               1.97 GiB
  PE Size               16.00 MiB
  Total PE              126
  Alloc PE / Size       0 / 0   
  Free  PE / Size       126 / 1.97 GiB
  VG UUID               g2HQ8f-DUvm-g3B0-k7ea-LgvE-bM9E-0ZnGQ4
   

> vgcolor VG에서 원하는 만큼 떼어내서 Logical volume을 만들어보자.

LV (logical volume)

  • lvcreat -L
    • 850G와 같이 명확한 용량 사이즈 지정
  • lvcreate -l
    • % 지정 가능 (VG 용량 중 몇 % 를 LV로 만들겠다)
    • extent 개수 지정 가능
    • 850G와 같은 용량 사이즈 지정 가능

VG에서 원하는 만큼 떼어내서 Logical volume을 만들어보자.

 > vgcolor VG에서 원하는 만큼 떼어내서 red라는 Logical volume을 만들어보자.
[root@localhost ~]# lvcreate -L 150M -n red vgcolor
  Rounding up size to full physical extent 160.00 MiB
  Logical volume "red" created.
  
[root@localhost ~]# lvs
  LV   VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root cl      -wi-ao----  17.00g                                                    
  swap cl      -wi-ao----   2.00g                                                    
  red  vgcolor -wi-a----- 160.00m      -----this one!!
  
> extent 20개 이므로, 16*20 = 320M  
[root@localhost ~]# lvcreate -l 20 -n blue vgcolor
  Logical volume "blue" created.
  
[root@localhost ~]# lvs
  LV   VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root cl      -wi-ao----  17.00g                                                    
  swap cl      -wi-ao----   2.00g                                                    
  blue vgcolor -wi-a----- 320.00m      -----this one!!                                               
  red  vgcolor -wi-a----- 160.00m  
  
> %로 LV 할당해보자 (남아있는 VG중 20%)
root@localhost ~]# lvcreate -l 20%FREE vgcolor
  Logical volume "lvol0" created.
  
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-a----- 320.00m                                                    
  lvol0 vgcolor -wi-a----- 304.00m     -----this one!!                                               
  red   vgcolor -wi-a----- 160.00m  


Two ways of reference a Logical Volume

1) computer : /dev/mapper/VGname-LVname
2) human : /dev/VGname/LVname

> lvdisplay를 통해 LV에 대한 자세한 정보 확인

> 1[root@localhost ~]# lvdisplay /dev/mapper/vgcolor-red
  --- Logical volume ---
  LV Path                /dev/vgcolor/red
  LV Name                red
  VG Name                vgcolor
  LV UUID                zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 11:48:06 +0900
  LV Status              available
  # open                 0
  LV Size                160.00 MiB
  Current LE             10
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:2
   
> 2[root@localhost ~]# lvdisplay /dev/vgcolor/red
  --- Logical volume ---
  LV Path                /dev/vgcolor/red
  LV Name                red
  VG Name                vgcolor
  LV UUID                zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 11:48:06 +0900
  LV Status              available
  # open                 0
  LV Size                160.00 MiB
  Current LE             10
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:2
   
[root@localhost ~]# cat /etc/fstab 

#
# /etc/fstab
# Created by anaconda on Thu Apr 13 10:19:58 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=0cab009d-2c67-4dc9-a881-c5efde97ed56 /boot                   xfs     defaults        0 0
/dev/mapper/cl-swap     swap                    swap    defaults        0 0

/etc/fstab

fields

1. Block device or remote directory : 파일 시스템 장치명
2. Mount point(directory) : 파일 시스템이 마운트 될 디렉토리 지정
3. Filesystem type (xfs | ext4 | swap) : 마운트 될 파일 시스템 종류
4. Extra mount option (defaults) : 파일 시스템 속성 설정
5. dump flag (0|1) : 0일 경우 백업하지 않음. 1일 경우 백업하는 파일 시스템
6. fsck order (0|1|2) : 루트 파일 시스템 점검 여부
(0: 부팅시 파일 시스템 점검 x, 1: 루트 파일 시스템으로 부팅 시 파일 시스템 점검, 2: 루트 파일 시스템 이외의 파일시스템으로서 부팅시 파일 시스템을 점검)

example

[root@localhost etc]# vi /etc/fstab
  #
  # /etc/fstab
  # Created by anaconda on Thu Apr 13 10:19:58 2023
  #
  # Accessible filesystems, by reference, are maintained under '/dev/disk'
  # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
  #
  /dev/mapper/cl-root     /                       xfs     defaults        0 0
  UUID=0cab009d-2c67-4dc9-a881-c5efde97ed56 /boot                   xfs     defaults        0 0
  /dev/mapper/cl-swap     swap                    swap    defaults        0 0
  /dev/sda1               /white                  xfs     defaults        0 0
  /dev/sda5               /black                  ext4    defaults        0 1

red, blue, lvol0 각각 LV에 대해 마운트 및 파일시스템 설정

[root@localhost ~]# mkfs -t xfs /dev/vgcolor/red
meta-data=/dev/vgcolor/red       isize=512    agcount=4, agsize=10240 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=40960, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=855, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@localhost ~]# mkfs -t ext4 /dev/vgcolor/blue
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done                            
Filesystem label=
OS type: Linux
Block size=1024 (log=0)
Fragment size=1024 (log=0)
Stride=0 blocks, Stripe width=0 blocks
81920 inodes, 327680 blocks
16384 blocks (5.00%) reserved for the super user
First data block=1
Maximum filesystem blocks=33947648
40 block groups
8192 blocks per group, 8192 fragments per group
2048 inodes per group
Superblock backups stored on blocks: 
	8193, 24577, 40961, 57345, 73729, 204801, 221185

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done 

[root@localhost ~]# mkswap /dev/vgcolor/lvol0
Setting up swapspace version 1, size = 311292 KiB
no label, UUID=797747f9-0b22-44a5-a387-508153880d81



[root@localhost ~]# lsblk -f
NAME            FSTYPE      LABEL UUID                                   MOUNTPOINT
sda             LVM2_member       6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq 
├─vgcolor-red   xfs               94645283-d87d-498c-ad3a-9254c2164e91   
├─vgcolor-blue  ext4              9a5d046e-6d37-4af4-baaf-352f5f113f62   
└─vgcolor-lvol0 swap              797747f9-0b22-44a5-a387-508153880d81   
sdb             LVM2_member       52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O 
sdc                                                                      
vda                                                                      
├─vda1          xfs               0cab009d-2c67-4dc9-a881-c5efde97ed56   /boot
└─vda2          LVM2_member       CFDwnt-NM58-A0Gj-sTQy-iCPE-EsRc-iqIAyb 
  ├─cl-root     xfs               999a2a08-42a2-4cbc-a52c-3544d87d2897   /
  └─cl-swap     swap              faab5b3c-984a-4a82-a139-92d1b706f013   [SWAP]
  
  
  
[root@localhost ~]# vi /etc/fstab   
# /etc/fstab
# Created by anaconda on Thu Apr 13 10:19:58 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=0cab009d-2c67-4dc9-a881-c5efde97ed56 /boot                   xfs     defaults        0 0
/dev/mapper/cl-swap     swap                    swap    defaults        0 0
/dev/mapper/vgcolor-red      /red               xfs     defaults        0 0
/dev/mapper/vgcolor-blue     /blue              ext4    defaults        0 0
/dev/mapper/vgcolor-lvol0    swap               swap    defaults        0 0


# /etc/fstab
# Created by anaconda on Thu Apr 13 10:19:58 2023
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/cl-root     /                       xfs     defaults        0 0
UUID=0cab009d-2c67-4dc9-a881-c5efde97ed56 /boot                   xfs     defaults        0 0
/dev/mapper/cl-swap     swap                    swap    defaults        0 0
/dev/vgcolor/red       /red             xfs     defaults        0 0
/dev/vgcolor/blue      /blue            ext4    defaults        0 0
/dev/vgcolor/lvol0     swap             swap    defaults        0 0


> mount 디렉토리 생성
[root@localhost ~]# mkdir /red /blue

> /red, /blue에 mount
[root@localhost ~]# mount -a

> mount 확인
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.8G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M  8.0K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   157M  8.2M  149M   6% /red   --------this one!!
/dev/mapper/vgcolor-blue  302M  2.1M  280M   1% /blue  --------this one!!

>swapon 활성화
[root@localhost ~]# swapon -a
[root@localhost ~]# swapon -s
Filename				Type		Size	Used	Priority
/dev/dm-1                              	partition	2097148	0	-1
/dev/dm-4                              	partition	311292	0	-2

Volume Group에 여유 공간이 부족하다면?

> red/에 bigfile 디렉토리를 만들겠다. 이때, block size가 10M인 것을 12개 파일을 urandom 난수 발생기를 통해 랜덤하게 만들겠다.

> 하지만 사이즈 여유공간이 부족하다면?
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.8G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   157M  129M   29M  82% /red
/dev/mapper/vgcolor-blue  302M  2.1M  280M   1% /blue


> Volume Group에 충분한 여유공간이 있으면, 떼서 주면 된다.
But, 없다면 Volume Group에 Physical Volume을 기존 Volume Group에 더 붙이면 된다.(추가하면 된다.)


> vgcolor VG에 여유공간(vFREE) 확인해보니 1.20G 존재하나, 부족하다고 가정하고 늘려보자.
[root@localhost ~]# vgs
  VG      #PV #LV #SN Attr   VSize  VFree
  cl        1   2   0 wz--n- 19.00g    0 
  vgcolor   2   3   0 wz--n-  1.97g 1.20g


> PV 확인 후, sdc PV 생성
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 a--  1008.00m  224.00m
  /dev/sdb   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/vda2  cl      lvm2 a--    19.00g       0 
  
[root@localhost ~]# pvcreate /dev/sdc
  Physical volume "/dev/sdc" successfully created.
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 a--  1008.00m  224.00m
  /dev/sdb   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/sdc           lvm2 ---     1.00g    1.00g
  /dev/vda2  cl      lvm2 a--    19.00g       0 

> vgcolor VG를 확장하겠다. (sdc를 이용하여)
[root@localhost ~]# pvcreate /dev/sdc
  Physical volume "/dev/sdc" successfully created.
  
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 a--  1008.00m  224.00m
  /dev/sdb   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/sdc           lvm2 ---     1.00g    1.00g
  /dev/vda2  cl      lvm2 a--    19.00g       0 
  
[root@localhost ~]# vgextend vgcolor /dev/sdc
  Volume group "vgcolor" successfully extended
  
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 a--  1008.00m  224.00m
  /dev/sdb   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/sdc   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/vda2  cl      lvm2 a--    19.00g       0 
  
[root@localhost ~]# vgs
  VG      #PV #LV #SN Attr   VSize  VFree
  cl        1   2   0 wz--n- 19.00g    0 
  vgcolor   3   3   0 wz--n-  2.95g 2.19g

> 이번엔 LV를 늘려보자. (size로 늘릴 수도 있고(-L), extent 개수로 늘릴 수도 있다.(-l))
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-ao---- 320.00m                                                    
  lvol0 vgcolor -wi-ao---- 304.00m                                                    
  red   vgcolor -wi-ao---- 160.00m 
  
> 주의: 최종 증가되는 사이즈, 최종 증가되는 extent 개수를 기입해야함.

> red LV를 640M 사이즈로 늘림 (-L옵션) or lvextend -l 40 /dev/vgcolor/red(40*16M)
[root@localhost ~]# lvextend -L 640M /dev/vgcolor/red
  Size of logical volume vgcolor/red changed from 160.00 MiB (10 extents) to 640.00 MiB (40 extents).
  Logical volume vgcolor/red successfully resized.
  
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-ao---- 320.00m                                                    
  lvol0 vgcolor -wi-ao---- 304.00m                                                    
  red   vgcolor -wi-ao---- 640.00m      
  

> 이번에는 blue LV를 늘려보자.
[root@localhost ~]# lvextend -l 40 /dev/vgcolor/blue
  Size of logical volume vgcolor/blue changed from 320.00 MiB (20 extents) to 640.00 MiB (40 extents).
  Logical volume vgcolor/blue successfully resized.
  
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-ao---- 640.00m                                                    
  lvol0 vgcolor -wi-ao---- 304.00m                                                    
  red   vgcolor -wi-ao---- 640.00m  
  
  
> df -h 확인: logical volume 사이즈만 늘렸지, 파일 시스템 사이즈를 늘린게 아니므로, 그대로이다.(즉, logical volume이 늘어났다고 그만큼 데이터가 저장될 수 없다. 파일 시스템도 그만큼 늘려주어야 한다.)
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.9G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   157M  129M   29M  82% /red    ----this check!!
/dev/mapper/vgcolor-blue  302M  2.1M  280M   1% /blue



> xfs 파일 포맷 확장 (/red)
[root@localhost ~]# xfs_growfs /dev/vgcolor/red
meta-data=/dev/mapper/vgcolor-red isize=512    agcount=4, agsize=10240 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0 spinodes=0
data     =                       bsize=4096   blocks=40960, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal               bsize=4096   blocks=855, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 40960 to 163840

> ext4 파일 포맷 확장(/blue)
[root@localhost ~]# resize2fs /dev/vgcolor/blue
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/vgcolor/blue is mounted on /blue; on-line resizing required
old_desc_blocks = 3, new_desc_blocks = 5
The filesystem on /dev/vgcolor/blue is now 655360 blocks long.

> 마운트이 되어 있는 상태에서, 파일 시스템 용량 확장 가능 -> df -h로 확장된 용량 확인
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.9G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   637M  129M  509M  21% /red        ----this check!!
/dev/mapper/vgcolor-blue  612M  2.3M  577M   1% /blue       ----this check!!

반대로, 파일시스템 용량을 줄이려면?


> 먼저 , umount 진행
[root@localhost ~]# umount /blu인

> umount된 것 확인
[root@localhost ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/cl-root       17G  3.9G   14G  23% /
devtmpfs                 2.0G     0  2.0G   0% /dev
tmpfs                    2.0G  144K  2.0G   1% /dev/shm
tmpfs                    2.0G  8.9M  2.0G   1% /run
tmpfs                    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1               1014M  157M  858M  16% /boot
tmpfs                    396M  4.0K  396M   1% /run/user/42
tmpfs                    396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red  637M  129M  509M  21% /red


> resize 시, 파일 시스템에 문제 있는지 없는지 강제로 먼저 체크를 해야 한다고 나옴.
[root@localhost ~]# resize2fs /dev/vgcolor/blue 480M
resize2fs 1.42.9 (28-Dec-2013)
Please run 'e2fsck -f /dev/vgcolor/blue' first.

[root@localhost ~]# e2fsck -f /dev/vgcolor/blue
e2fsck 1.42.9 (28-Dec-2013)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/vgcolor/blue: 11/163840 files (0.0% non-contiguous), 31187/655360 blocks

> 
[root@localhost ~]# resize2fs /dev/vgcolor/blue 480M
resize2fs 1.42.9 (28-Dec-2013)
Resizing the filesystem on /dev/vgcolor/blue to 491520 (1k) blocks.
The filesystem on /dev/vgcolor/blue is now 491520 blocks long.

[root@localhost ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/cl-root       17G  3.9G   14G  23% /
devtmpfs                 2.0G     0  2.0G   0% /dev
tmpfs                    2.0G  144K  2.0G   1% /dev/shm
tmpfs                    2.0G  8.9M  2.0G   1% /run
tmpfs                    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1               1014M  157M  858M  16% /boot
tmpfs                    396M  4.0K  396M   1% /run/user/42
tmpfs                    396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red  637M  129M  509M  21% /red

[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-a----- 640.00m                                                    
  lvol0 vgcolor -wi-ao---- 304.00m                                                    
  red   vgcolor -wi-ao---- 640.00m                                                    
  
> blue LV 사이즈 줄임  
[root@localhost ~]# lvreduce -L 480M /dev/vgcolor/blue
  WARNING: Reducing active logical volume to 480.00 MiB.
  THIS MAY DESTROY YOUR DATA (filesystem etc.)
Do you really want to reduce vgcolor/blue? [y/n]: y
  Size of logical volume vgcolor/blue changed from 640.00 MiB (40 extents) to 480.00 MiB (30 extents).
  Logical volume vgcolor/blue successfully resized.
  
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-a----- 480.00m                                                    
  lvol0 vgcolor -wi-ao---- 304.00m                                                    
  red   vgcolor -wi-ao---- 640.00m        


[root@localhost ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/cl-root       17G  3.9G   14G  23% /
devtmpfs                 2.0G     0  2.0G   0% /dev
tmpfs                    2.0G  144K  2.0G   1% /dev/shm
tmpfs                    2.0G  8.9M  2.0G   1% /run
tmpfs                    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1               1014M  157M  858M  16% /boot
tmpfs                    396M  4.0K  396M   1% /run/user/42
tmpfs                    396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red  637M  129M  509M  21% /red


> 다시  /blue에 mount
[root@localhost ~]# mount /blue

> df -h로 파일 시스템 용량 최종 줄어듦을 확인!
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.9G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   637M  129M  509M  21% /red
/dev/mapper/vgcolor-blue  457M  2.3M  429M   1% /blue    --this check!!


> vgcolor에 대한 속성 정보를 출력해준다.  가끔, volume group(VG)에 문제가 생기면, 해당 파일을 참고해서 VG 복구 가능!
[root@localhost ~]# ls /etc/lvm/backup/
cl  vgcolor

[root@localhost ~]# cat /etc/lvm/backup/vgcolor
# Generated by LVM2 version 2.02.166(2)-RHEL7 (2016-11-16): Mon Apr 17 13:59:32 2023

contents = "Text Format Volume Group"
version = 1

description = "Created *after* executing 'lvreduce -L 480M /dev/vgcolor/blue'"

creation_host = "localhost.localdomain"	# Linux localhost.localdomain 3.10.0-514.26.1.el7.x86_64 #1 SMP Thu Jun 29 16:05:25 UTC 2017 x86_64
creation_time = 1681707572	# Mon Apr 17 13:59:32 2023

vgcolor {
	id = "g2HQ8f-DUvm-g3B0-k7ea-LgvE-bM9E-0ZnGQ4"
	seqno = 8
	format = "lvm2"			# informational
	status = ["RESIZEABLE", "READ", "WRITE"]
	flags = []
	extent_size = 32768		# 16 Megabytes
	max_lv = 0
	max_pv = 0
	metadata_copies = 0

	physical_volumes {

		pv0 {
			id = "6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq"
			device = "/dev/sda"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}

		pv1 {
			id = "52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O"
			device = "/dev/sdb"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}

		pv2 {
			id = "k7k20f-BqVU-Luyx-Vt9e-8Jao-YsDm-KuHc0P"
			device = "/dev/sdc"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}
	}

	logical_volumes {

		red {
			id = "zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699686	# 2023-04-17 11:48:06 +0900
			creation_host = "localhost.localdomain"
			segment_count = 3

			segment1 {
				start_extent = 0
				extent_count = 10	# 160 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 0
				]
			}
			segment2 {
				start_extent = 10
				extent_count = 14	# 224 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 49
				]
			}
			segment3 {
				start_extent = 24
				extent_count = 16	# 256 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv1", 0
				]
			}
		}

		blue {
			id = "XRtCnu-fRAI-XkfL-f4fm-OsYy-UTNR-p8Ul7u"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699719	# 2023-04-17 11:48:39 +0900
			creation_host = "localhost.localdomain"
			segment_count = 2

			segment1 {
				start_extent = 0
				extent_count = 20	# 320 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 10
				]
			}
			segment2 {
				start_extent = 20
				extent_count = 10	# 160 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv1", 16
				]
			}
		}

		lvol0 {
			id = "3sZq1c-3xHR-VdKv-l9a2-E4AX-VaG1-PvJN4D"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699844	# 2023-04-17 11:50:44 +0900
			creation_host = "localhost.localdomain"
			segment_count = 1

			segment1 {
				start_extent = 0
				extent_count = 19	# 304 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 30
				]
			}
		}
	}

}


> vgcfgbackup 그냥 실행 시 -> /etc/lvm/backup 에 디폴트로 백업됨.
[root@localhost ~]# vgcfgbackup
  Volume group "cl" successfully backed up.
  Volume group "vgcolor" successfully backed up.
  
> vgcfgbackup 백업 파일 지정 가능

[root@localhost ~]# vgcfgbackup -f /tmp/vg.back vgcolor
  Volume group "vgcolor" successfully backed up.
[root@localhost ~]# cat /tmp/vg.back
# Generated by LVM2 version 2.02.166(2)-RHEL7 (2016-11-16): Mon Apr 17 14:07:10 2023

contents = "Text Format Volume Group"
version = 1

description = "vgcfgbackup -f /tmp/vg.back vgcolor"

creation_host = "localhost.localdomain"	# Linux localhost.localdomain 3.10.0-514.26.1.el7.x86_64 #1 SMP Thu Jun 29 16:05:25 UTC 2017 x86_64
creation_time = 1681708030	# Mon Apr 17 14:07:10 2023

vgcolor {
	id = "g2HQ8f-DUvm-g3B0-k7ea-LgvE-bM9E-0ZnGQ4"
	seqno = 8
	format = "lvm2"			# informational
	status = ["RESIZEABLE", "READ", "WRITE"]
	flags = []
	extent_size = 32768		# 16 Megabytes
	max_lv = 0
	max_pv = 0
	metadata_copies = 0

	physical_volumes {

		pv0 {
			id = "6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq"
			device = "/dev/sda"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}

		pv1 {
			id = "52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O"
			device = "/dev/sdb"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}

		pv2 {
			id = "k7k20f-BqVU-Luyx-Vt9e-8Jao-YsDm-KuHc0P"
			device = "/dev/sdc"	# Hint only

			status = ["ALLOCATABLE"]
			flags = []
			dev_size = 2097152	# 1024 Megabytes
			pe_start = 2048
			pe_count = 63	# 1008 Megabytes
		}
	}

	logical_volumes {

		red {
			id = "zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699686	# 2023-04-17 11:48:06 +0900
			creation_host = "localhost.localdomain"
			segment_count = 3

			segment1 {
				start_extent = 0
				extent_count = 10	# 160 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 0
				]
			}
			segment2 {
				start_extent = 10
				extent_count = 14	# 224 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 49
				]
			}
			segment3 {
				start_extent = 24
				extent_count = 16	# 256 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv1", 0
				]
			}
		}

		blue {
			id = "XRtCnu-fRAI-XkfL-f4fm-OsYy-UTNR-p8Ul7u"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699719	# 2023-04-17 11:48:39 +0900
			creation_host = "localhost.localdomain"
			segment_count = 2

			segment1 {
				start_extent = 0
				extent_count = 20	# 320 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 10
				]
			}
			segment2 {
				start_extent = 20
				extent_count = 10	# 160 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv1", 16
				]
			}
		}

		lvol0 {
			id = "3sZq1c-3xHR-VdKv-l9a2-E4AX-VaG1-PvJN4D"
			status = ["READ", "WRITE", "VISIBLE"]
			flags = []
			creation_time = 1681699844	# 2023-04-17 11:50:44 +0900
			creation_host = "localhost.localdomain"
			segment_count = 1

			segment1 {
				start_extent = 0
				extent_count = 19	# 304 Megabytes

				type = "striped"
				stripe_count = 1	# linear

				stripes = [
					"pv0", 30
				]
			}
		}
	}

}


> backup한 것 restore
[root@localhost ~]# vgcfgrestore -f /tmp/vg.back vgcolor
  Restored volume group vgcolor

> 
[root@localhost ~]# vgdisplay vgcolor
  --- Volume group ---
  VG Name               vgcolor
  System ID             
  Format                lvm2
  Metadata Areas        3
  Metadata Sequence No  9
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                3
  Open LV               3
  Max PV                0
  Cur PV                3
  Act PV                3
  VG Size               2.95 GiB
  PE Size               16.00 MiB
  Total PE              189
  Alloc PE / Size       89 / 1.39 GiB
  Free  PE / Size       100 / 1.56 GiB
  VG UUID               g2HQ8f-DUvm-g3B0-k7ea-LgvE-bM9E-0ZnGQ4
   

Moving Volumes Between Systems

  • 새로운 시스템 (old -> new) 디스크 이전하는 process
  1. unmount /디렉토리 (언마운트하기)
  2. vgchange -a n VG이름 (:volume group 비활성화)
  3. vgexport VG이름 (:export해서 해당 VG 내보냄)
  4. poweroff

  1. 새로운 디스크장착
  2. power on
  3. pvscan (:physical volume들을 scan함 -> 어느 Volume group에 속하는지 분류됨)
  4. vgimport VG이름 (:volume group이 import됨)
  5. vgchange -a y VG이름 (VG가 아직 비활성화 상태이므로, volume group을 활성화해주어야 함.
[root@localhost ~]# df -h
Filesystem                Size  Used Avail Use% Mounted on
/dev/mapper/cl-root        17G  3.9G   14G  23% /
devtmpfs                  2.0G     0  2.0G   0% /dev
tmpfs                     2.0G  144K  2.0G   1% /dev/shm
tmpfs                     2.0G  8.9M  2.0G   1% /run
tmpfs                     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                1014M  157M  858M  16% /boot
tmpfs                     396M  4.0K  396M   1% /run/user/42
tmpfs                     396M   12K  396M   1% /run/user/0
/dev/mapper/vgcolor-red   637M  129M  509M  21% /red
/dev/mapper/vgcolor-blue  457M  2.3M  429M   1% /blue

[root@localhost ~]# umount /red /blue

[root@localhost ~]# df -h
Filesystem           Size  Used Avail Use% Mounted on
/dev/mapper/cl-root   17G  3.9G   14G  23% /
devtmpfs             2.0G     0  2.0G   0% /dev
tmpfs                2.0G  144K  2.0G   1% /dev/shm
tmpfs                2.0G  8.9M  2.0G   1% /run
tmpfs                2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1           1014M  157M  858M  16% /boot
tmpfs                396M  4.0K  396M   1% /run/user/42
tmpfs                396M   12K  396M   1% /run/user/0

[root@localhost ~]# swapoff /dev/vgcolor/lvol0


[root@localhost ~]# vgchange -a n vgcolor
  0 logical volume(s) in volume group "vgcolor" now active
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi------- 480.00m                                                    
  lvol0 vgcolor -wi------- 304.00m                                                    
  red   vgcolor -wi------- 640.00m                                                    
[root@localhost ~]# lvdisplay /dev/vgcolor/red
  --- Logical volume ---
  LV Path                /dev/vgcolor/red
  LV Name                red
  VG Name                vgcolor
  LV UUID                zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 11:48:06 +0900
  LV Status              NOT available   ---this check!!!
  LV Size                640.00 MiB
  Current LE             40
  Segments               3
  Allocation             inherit
  Read ahead sectors     auto
   
[root@localhost ~]# vgexport vgcolor
  Volume group "vgcolor" successfully exported

> Attr 에 x붙음 (export)
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 ax-  1008.00m       0      ---Attr this check!!!!
  /dev/sdb   vgcolor lvm2 ax-  1008.00m  592.00m	 ---Attr this check!!!!
  /dev/sdc   vgcolor lvm2 ax-  1008.00m 1008.00m	 ---Attr this check!!!!
  /dev/vda2  cl      lvm2 a--    19.00g       0 
  
> power off 후, 디스크 바꿔끼웠다고 가정  

> pvscan
[root@localhost ~]# pvscan   
  PV /dev/vda2   VG cl              lvm2 [19.00 GiB / 0    free]
  PV /dev/sda     is in exported VG vgcolor [1008.00 MiB / 0    free]
  PV /dev/sdb     is in exported VG vgcolor [1008.00 MiB / 592.00 MiB free]
  PV /dev/sdc     is in exported VG vgcolor [1008.00 MiB / 1008.00 MiB free]
  Total: 4 [21.95 GiB] / in use: 4 [21.95 GiB] / in no VG: 0 [0   ]

> vgimport 수행
[root@localhost ~]# vgimport vgcolor
  Volume group "vgcolor" successfully imported
  
> export(x- 사라짐)
[root@localhost ~]# pvs
  PV         VG      Fmt  Attr PSize    PFree   
  /dev/sda   vgcolor lvm2 a--  1008.00m       0 
  /dev/sdb   vgcolor lvm2 a--  1008.00m  592.00m
  /dev/sdc   vgcolor lvm2 a--  1008.00m 1008.00m
  /dev/vda2  cl      lvm2 a--    19.00g       0 

[root@localhost ~]# vgdisplay vgcolor
  --- Volume group ---
  VG Name               vgcolor
  System ID             
  Format                lvm2
  Metadata Areas        3
  Metadata Sequence No  11
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                3
  Open LV               0
  Max PV                0
  Cur PV                3
  Act PV                3
  VG Size               2.95 GiB
  PE Size               16.00 MiB
  Total PE              189
  Alloc PE / Size       89 / 1.39 GiB
  Free  PE / Size       100 / 1.56 GiB
  VG UUID               g2HQ8f-DUvm-g3B0-k7ea-LgvE-bM9E-0ZnGQ4


> 아직, Logical Volume은 활성화되지 않은 상태임.
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi------- 480.00m                                                    
  lvol0 vgcolor -wi------- 304.00m                                                    
  red   vgcolor -wi------- 640.00m                                           

> LV status 확인
[root@localhost ~]# lvdisplay /dev/vgcolor/red
  --- Logical volume ---
  LV Path                /dev/vgcolor/red
  LV Name                red
  VG Name                vgcolor
  LV UUID                zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 11:48:06 +0900
  LV Status              NOT available   -----this check!!!아직 비활성화 상태
  LV Size                640.00 MiB
  Current LE             40
  Segments               3
  Allocation             inherit
  Read ahead sectors     auto

> vgcolor라는 LV 활성화 
[root@localhost ~]# vgchange -a y vgcolor
  3 logical volume(s) in volume group "vgcolor" now active
  
[root@localhost ~]# lvs
  LV    VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root  cl      -wi-ao----  17.00g                                                    
  swap  cl      -wi-ao----   2.00g                                                    
  blue  vgcolor -wi-a----- 480.00m                                                    
  lvol0 vgcolor -wi-a----- 304.00m                                                    
  red   vgcolor -wi-a----- 640.00m                                                    
[root@localhost ~]# lvdisplay /dev/vgcolor/red
  --- Logical volume ---
  LV Path                /dev/vgcolor/red
  LV Name                red
  VG Name                vgcolor
  LV UUID                zUlKwe-fLDP-oZrd-LrlW-NfR7-2l6Z-vAaaX5
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 11:48:06 +0900
  LV Status              available		-----this check!!!활성화 됨.
  # open                 0
  LV Size                640.00 MiB
  Current LE             40
  Segments               3
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:2
   

Logical Volume 삭제

> LV 삭제
[root@localhost ~]# lvremove /dev/vgcolor/lvol0
Do you really want to remove active logical volume vgcolor/lvol0? [y/n]: y
  Logical volume "lvol0" successfully removed
  
[root@localhost ~]# lvs
  LV   VG      Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root cl      -wi-ao----  17.00g                                                    
  swap cl      -wi-ao----   2.00g                                                    
  blue vgcolor -wi-a----- 480.00m                                                    
  red  vgcolor -wi-a----- 640.00m    

> VG 삭제
[root@localhost ~]# vgremove vgcolor
Do you really want to remove volume group "vgcolor" containing 2 logical volumes? [y/n]: y
Do you really want to remove active logical volume vgcolor/red? [y/n]: y
  Logical volume "red" successfully removed
Do you really want to remove active logical volume vgcolor/blue? [y/n]: y
  Logical volume "blue" successfully removed
  Volume group "vgcolor" successfully removed

[root@localhost ~]# vgs
  VG #PV #LV #SN Attr   VSize  VFree
  cl   1   2   0 wz--n- 19.00g    0 

> Physical Volume은 아직 남아있음
[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/sda      lvm2 ---   1.00g 1.00g
  /dev/sdb      lvm2 ---   1.00g 1.00g
  /dev/sdc      lvm2 ---   1.00g 1.00g
  /dev/vda2  cl lvm2 a--  19.00g    0 

> /dev/sdc PV 삭제
[root@localhost ~]# pvremove /dev/sdc
  Labels on physical volume "/dev/sdc" successfully wiped.

> pvs 확인
[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/sda      lvm2 ---   1.00g 1.00g
  /dev/sdb      lvm2 ---   1.00g 1.00g
  /dev/vda2  cl lvm2 a--  19.00g    0 

Advanced LVM : Automated Storage Tiering

  • 사양 exampale

    • HDD(하드디스크 2TB)
    • SSD(512G)
    • SSHD(2TB) : SSD와 하드디스크를 결합한 하이브리드 형태
      • OS가 설치되는 영역 : SSD
      • 실제 data가 저장되는 영역 : HDD
      • 이것이 LVM Tiering과 똑같은 것.
  • Storage Tiering

    • SSD: 읽고 쓰기 성능이 빠르기 때문에, 캐쉬 용도로 사용
    • HDD: 실제 data를 저장
      => 이들을 PV(physical volume으로 만듬)
      => HDD를 묶어서 tag로 지정, SSD를 묶어서 tag로 지정
      => 이러한 PV를 묶고 묶어 VG(volume group)으로 만듦.
      => data라고 하는 tag를 갖고 있는 PV만 모아서 LV(Logical volume)1으로 만듦.
      => 캐쉬라고 하는 tag를 갖고 있는 PV만 모아서 LV(Logical volume)2으로 만듦.
      => 그리고 LV1과 LV2를 서로 묶어서 (LV1+LV2) 하나의 LV(Logical volume)처럼 만들어줌.
      => 이를 바로 storage Tiering이라고 부름.
    • option
      • writethrough: 읽기 성능 향상, 쓰기 성능 보통
        • 안정성 보장
      • writeback: ssd로부터 읽고 쓰니까, 읽기 성능 향상, 쓰기 성능 향상
        • 안정성 문제 (캐쉬에 쓴게 날라 갈 수도 있으므로)
> pvs object tags
[root@localhost ~]# pvs -o+tags
  PV         VG Fmt  Attr PSize  PFree PV Tags
  /dev/sda      lvm2 ---   1.00g 1.00g        
  /dev/sdb      lvm2 ---   1.00g 1.00g        
  /dev/vda2  cl lvm2 a--  19.00g    0  

[root@localhost ~]# df -h
Filesystem           Size  Used Avail Use% Mounted on
/dev/mapper/cl-root   17G  3.9G   14G  23% /
devtmpfs             2.0G     0  2.0G   0% /dev
tmpfs                2.0G  144K  2.0G   1% /dev/shm
tmpfs                2.0G  8.9M  2.0G   1% /run
tmpfs                2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1           1014M  157M  858M  16% /boot
tmpfs                396M  4.0K  396M   1% /run/user/42
tmpfs                396M   12K  396M   1% /run/user/0

[root@localhost ~]# pvcreate /dev/sdc
  Physical volume "/dev/sdc" successfully created.
  
[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/sda      lvm2 ---   1.00g 1.00g
  /dev/sdb      lvm2 ---   1.00g 1.00g
  /dev/sdc      lvm2 ---   1.00g 1.00g
  /dev/vda2  cl lvm2 a--  19.00g    0[root@localhost ~]# vgs
  VG #PV #LV #SN Attr   VSize  VFree
  cl   1   2   0 wz--n- 19.00g    0 
  
> VG 생성  
[root@localhost ~]# vgcreate vgcloud /dev/sda /dev/sdb /dev/sdc
  Volume group "vgcloud" successfully created


> VG(volume group) 이 만들어져야, tag를 지정할 수 있다.
> tag 생성
[root@localhost ~]# pvchange --addtag slowhdd /dev/sda /dev/sdb
  Physical volume "/dev/sda" changed
  Physical volume "/dev/sdb" changed
  2 physical volumes changed / 0 physical volumes not changed
  
> tag 생성  
[root@localhost ~]# pvchange --addtag sdd /dev/sdc
  Physical volume "/dev/sdc" changed
  1 physical volume changed / 0 physical volumes not changed

> tag 확인
[root@localhost ~]# pvs -o+tags
  PV         VG      Fmt  Attr PSize    PFree    PV Tags
  /dev/sda   vgcloud lvm2 a--  1020.00m 1020.00m slowhdd
  /dev/sdb   vgcloud lvm2 a--  1020.00m 1020.00m slowhdd
  /dev/sdc   vgcloud lvm2 a--  1020.00m 1020.00m sdd    
  /dev/vda2  cl      lvm2 a--    19.00g       0         

> vgcloud Volume Group(VG)에서 slowhdd라는 태그(@slowhdd)를 갖고 있는 애들만 모아서 lvdata이름의 Logical Volume(LV) 만들자
[root@localhost ~]# lvcreate -l 100%FREE -n lvdata vgcloud @slowhdd
WARNING: xfs signature detected on /dev/vgcloud/lvdata at offset 0. Wipe it? [y/n]: y
  Wiping xfs signature on /dev/vgcloud/lvdata.
  Logical volume "lvdata" created.


>  vgcloud Volume Group(VG)에서 sdd라는 태그를 갖고 있는 애들만 모아서 lvcache이름의 Logical Volume(LV) 만들자
> cache-poll로 쓰겠다는 type 옵션추가 (--type cache-pool)
[root@localhost ~]# lvcreate --type cache-pool -l 100%FREE -n lvcache vgcloud @sdd
  Using default stripesize 64.00 KiB.
  Logical volume "lvcache" created.
  
> LV 확인
[root@localhost ~]# lvs
  LV      VG      Attr       LSize    Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root    cl      -wi-ao----   17.00g                                                    
  swap    cl      -wi-ao----    2.00g                                                    
  lvcache vgcloud Cwi---C--- 1004.00m                                                    
  lvdata  vgcloud -wi-a-----    1.99g  


> lvcache를  lvdata의 cachepool로 쓰겠다. -> lvcache, lvdata 두개의 LV가 하나로 결합됨.
[root@localhost ~]# lvconvert --type cache --cachepool vgcloud/lvcache vgcloud/lvdata
Do you want wipe existing metadata of cache pool volume vgcloud/lvcache? [y/n]: y
  Logical volume vgcloud/lvdata is now cached.

> LV 확인 (결합됨)
[root@localhost ~]# lvs
  LV     VG      Attr       LSize  Pool      Origin         Data%  Meta%  Move Log Cpy%Sync Convert
  root   cl      -wi-ao---- 17.00g                                                                 
  swap   cl      -wi-ao----  2.00g                                                                 
  lvdata vgcloud Cwi-a-C---  1.99g [lvcache] [lvdata_corig] 0.00   1.86            0.00       

> 파일 포맷
[root@localhost ~]# df -h
Filesystem           Size  Used Avail Use% Mounted on
/dev/mapper/cl-root   17G  3.9G   14G  23% /
devtmpfs             2.0G     0  2.0G   0% /dev
tmpfs                2.0G  144K  2.0G   1% /dev/shm
tmpfs                2.0G  8.9M  2.0G   1% /run
tmpfs                2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1           1014M  157M  858M  16% /boot
tmpfs                396M  4.0K  396M   1% /run/user/42
tmpfs                396M   12K  396M   1% /run/user/0

[root@localhost ~]# mkfs.xfs /dev/vgcloud/lvdata
meta-data=/dev/vgcloud/lvdata    isize=512    agcount=8, agsize=65264 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=522112, imaxpct=25
         =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

> mount 시킴.
[root@localhost ~]# mkdir /data
[root@localhost ~]# mount /dev/vgcloud/lvdata /data

> 확인
[root@localhost ~]# df -h
Filesystem                  Size  Used Avail Use% Mounted on
/dev/mapper/cl-root          17G  3.9G   14G  23% /
devtmpfs                    2.0G     0  2.0G   0% /dev
tmpfs                       2.0G  144K  2.0G   1% /dev/shm
tmpfs                       2.0G  8.9M  2.0G   1% /run
tmpfs                       2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                  1014M  157M  858M  16% /boot
tmpfs                       396M  4.0K  396M   1% /run/user/42
tmpfs                       396M   12K  396M   1% /run/user/0
/dev/mapper/vgcloud-lvdata  2.0G   33M  2.0G   2% /data



>
[root@localhost ~]# lvs
  LV     VG      Attr       LSize  Pool      Origin         Data%  Meta%  Move Log Cpy%Sync Convert
  root   cl      -wi-ao---- 17.00g                                                                 
  swap   cl      -wi-ao----  2.00g                                                                 
  lvdata vgcloud Cwi-aoC---  1.99g [lvcache] [lvdata_corig] 0.97   1.86            0.00         
  
  
  
> lvs -o 옵션으로 컬럼 지정 가능
> vgcloud의 lvdata에서 다음과 같은 컬럼에 대한 lvs 정보를 출력
[root@localhost ~]# lvs -o cachemode,cache_read_hits,cache_total_blocks vgcloud/lvdata
  CacheMode       CacheReadHits    CacheTotalBlocks
  writethrough              336            16064
  
[root@localhost ~]# cp /usr/bin/* /data

> HDD에 데이터 쓰기 전에 캐쉬가 사용될 것이므로, cachereadhits 가 늘어남.
[root@localhost ~]# lvs -o cachemode,cache_read_hits,cache_total_blocks vgcloud/lvdata
  CacheMode       CacheReadHits    CacheTotalBlocks
  writethrough              343            16064

=> writethrough (디폴트) 방식이므로, 읽기 성능의 향상.(쓰기 성능은 그대로)

Advanced LVM : RAID Volumes

  • 하드웨어 적으로 RAID 구현하지 않을 경우, 소프트웨어 적으로 RAID 구현

striping

  • 일을 분담해서 하도록 함.
  • RAID 0

mirroring

  • 디스크 이중화 -> 한쪽 디스크가 장애나더라도, 다른 디스크가 수행
  • 최소한 디스크가 2개 붙어있어야 함.
  • 읽기 성능을 향상시킬 수 있다.
  • RAID 1

RAID

  • RAID 0: striping
  • RAID 1: mirroring
  • RAID 3: parity(패리티) 값 저장
    • 10이라는 data가 들어왔을 때, parity 값에 저장을 해두어서, 고장난 디스크의 값 (?) 을 다음과 같이, 5+?+2 = 10 연산으로 유추할 수 있도록 함.
    • parity 값을 저장하는 디스크가 다른 디스크보다 훨씬 읽고 쓰기를 많이 하므로, 고장날 확률이 높아짐.
  • RAID 5: 따라서, parity 값도 한 디스크에 저장하지 말고, 여러 디스크에 분산해서 저장하자. (즉, parity 값을 분산해서 저장)
    • 색칠된 부분이 parity 저장된 곳
  • RAID 6: double parity (parity 값을 두개 저장)

LVM + mirroring

[root@localhost ~]# df -h
Filesystem                  Size  Used Avail Use% Mounted on
/dev/mapper/cl-root          17G  3.9G   14G  23% /
devtmpfs                    2.0G     0  2.0G   0% /dev
tmpfs                       2.0G  144K  2.0G   1% /dev/shm
tmpfs                       2.0G  8.9M  2.0G   1% /run
tmpfs                       2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                  1014M  157M  858M  16% /boot
tmpfs                       396M  4.0K  396M   1% /run/user/42
tmpfs                       396M   12K  396M   1% /run/user/0
/dev/mapper/vgcloud-lvdata  2.0G  190M  1.8G  10% /dat럼

[root@localhost ~]# umount /data

[root@localhost ~]# vgremove vgcloud
Do you really want to remove volume group "vgcloud" containing 1 logical volumes? [y/n]: y
Do you really want to remove active logical volume vgcloud/lvdata? [y/n]: y
  Logical volume "lvcache" successfully removed
  Logical volume "lvdata" successfully removed
  Volume group "vgcloud" successfully removed

[root@localhost ~]# pvs
  PV         VG Fmt  Attr PSize  PFree
  /dev/sda      lvm2 ---   1.00g 1.00g
  /dev/sdb      lvm2 ---   1.00g 1.00g
  /dev/sdc      lvm2 ---   1.00g 1.00g
  /dev/vda2  cl lvm2 a--  19.00g    0 

> VG vgtest 생성
[root@localhost ~]# vgcreate vgtest /dev/sda /dev/sdb /dev/sdc
  Volume group "vgtest" successfully created

[root@localhost ~]# vgs
  VG     #PV #LV #SN Attr   VSize  VFree
  cl       1   2   0 wz--n- 19.00g    0 
  vgtest   3   0   0 wz--n-  2.99g 2.99g

> -m 1: 미러링 1[root@localhost ~]# lvcreate -L 100M -m 1 -n mirror vgtest
  Logical volume "mirror" created.

> Sync부분이 같은거 가지고 있는지 나타내는 클럼
[root@localhost ~]# lvs
  LV     VG     Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  root   cl     -wi-ao----  17.00g                                                    
  swap   cl     -wi-ao----   2.00g                                                    
  mirror vgtest rwi-a-r--- 100.00m                                    92.00    
> mapping 정보를 보여달라.  
[root@localhost ~]# lvdisplay -m /dev/vgtest/mirror
  --- Logical volume ---
  LV Path                /dev/vgtest/mirror
  LV Name                mirror
  VG Name                vgtest
  LV UUID                k0qAXT-5Mp9-tJ4Q-kjaO-FTnT-A1Yi-giT6PW
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 15:58:45 +0900
  LV Status              available
  # open                 0
  LV Size                100.00 MiB
  Current LE             25
  Mirrored volumes       2
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:6
   
  --- Segments ---
  Logical extents 0 to 24:
    Type		raid1
    Monitoring		monitored
    Raid Data LV 0
      Logical volume	mirror_rimage_0
      Logical extents	0 to 24
    Raid Data LV 1
      Logical volume	mirror_rimage_1
      Logical extents	0 to 24
    Raid Metadata LV 0	mirror_rmeta_0
    Raid Metadata LV 1	mirror_rmeta_1


[root@localhost ~]# vgdisplay -v vgtest
  --- Volume group ---
  VG Name               vgtest
  System ID             
  Format                lvm2
  Metadata Areas        3
  Metadata Sequence No  3
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                1
  Open LV               0
  Max PV                0
  Cur PV                3
  Act PV                3
  VG Size               2.99 GiB
  PE Size               4.00 MiB
  Total PE              765
  Alloc PE / Size       52 / 208.00 MiB
  Free  PE / Size       713 / 2.79 GiB
  VG UUID               g9L7mJ-Jf2i-o1W0-3k07-kTnK-b0Ld-Lt9xB1
   
  --- Logical volume ---
  LV Path                /dev/vgtest/mirror
  LV Name                mirror
  VG Name                vgtest
  LV UUID                k0qAXT-5Mp9-tJ4Q-kjaO-FTnT-A1Yi-giT6PW
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 15:58:45 +0900
  LV Status              available
  # open                 0
  LV Size                100.00 MiB
  Current LE             25
  Mirrored volumes       2
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:6
   
  --- Physical volumes ---
  PV Name               /dev/sda     
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
  PV Status             allocatable
  Total PE / Free PE    255 / 229
   
  PV Name               /dev/sdb     
  PV UUID               52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O
  PV Status             allocatable
  Total PE / Free PE    255 / 229
   
  PV Name               /dev/sdc     
  PV UUID               adfL89-v8PF-iNyr-1lLD-1hQA-rf8e-rCZa5g
  PV Status             allocatable
  Total PE / Free PE    255 / 255
   
   
[root@localhost ~]# lvremove /dev/vgtest/mirror
Do you really want to remove active logical volume vgtest/mirror? [y/n]: y
  Logical volume "mirror" successfully removed

> 원본과 원본과 관련된 미러링 1개, RAID 1, vgtest라는 VG에서 
[root@localhost ~]# lvcreate -L 400M --type raid1 -m 1 -n lvmirror vgtest
  Logical volume "lvmirror" created.
[root@localhost ~]# vgdisplay -v vgtest
  --- Volume group ---
  VG Name               vgtest
  System ID             
  Format                lvm2
  Metadata Areas        3
  Metadata Sequence No  6
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                1
  Open LV               0
  Max PV                0
  Cur PV                3
  Act PV                3
  VG Size               2.99 GiB
  PE Size               4.00 MiB
  Total PE              765
  Alloc PE / Size       202 / 808.00 MiB
  Free  PE / Size       563 / 2.20 GiB
  VG UUID               g9L7mJ-Jf2i-o1W0-3k07-kTnK-b0Ld-Lt9xB1
   
  --- Logical volume ---
  LV Path                /dev/vgtest/lvmirror
  LV Name                lvmirror
  VG Name                vgtest
  LV UUID                n2A6Av-0It6-ExXb-6Ya6-home-z6gV-e0cfi1
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 16:06:10 +0900
  LV Status              available
  # open                 0
  LV Size                400.00 MiB
  Current LE             100
  Mirrored volumes       2
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:6
   
  --- Physical volumes ---
  PV Name               /dev/sda     
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
  PV Status             allocatable
  Total PE / Free PE    255 / 154       ----- this check!! mirroring
   
  PV Name               /dev/sdb     
  PV UUID               52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O
  PV Status             allocatable
  Total PE / Free PE    255 / 154		----- this check!! mirroring
   
  PV Name               /dev/sdc     
  PV UUID               adfL89-v8PF-iNyr-1lLD-1hQA-rf8e-rCZa5g
  PV Status             allocatable
  Total PE / Free PE    255 / 255      	----- this check!!not mirroring          
   
> LVcreate
> 옵션 => VG이름 :vgtest, --type (raid타입): raid5, -i (디스크 개수): disk 3개, -n (이름): lvraid5
[root@localhost ~]# lvcreate -L 100M --type raid5 -i 3 -n lvraid5 vgtest
  Using default stripesize 64.00 KiB.
  Rounding size 100.00 MiB (25 extents) up to stripe boundary size 108.00 MiB (27 extents).
  Logical volume "lvraid5" created.

> 확인 (각각 10개씩 사용됨을 확인 가능)
[root@localhost ~]# vgdisplay -v vgtest
  --- Volume group ---
  VG Name               vgtest
  System ID             
  Format                lvm2
  Metadata Areas        4
  Metadata Sequence No  10
  VG Access             read/write
  VG Status             resizable
  MAX LV                0
  Cur LV                1
  Open LV               0
  Max PV                0
  Cur PV                4
  Act PV                4
  VG Size               3.98 GiB
  PE Size               4.00 MiB
  Total PE              1020
  Alloc PE / Size       40 / 160.00 MiB
  Free  PE / Size       980 / 3.83 GiB
  VG UUID               g9L7mJ-Jf2i-o1W0-3k07-kTnK-b0Ld-Lt9xB1
   
  --- Logical volume ---
  LV Path                /dev/vgtest/lvraid5
  LV Name                lvraid5
  VG Name                vgtest
  LV UUID                wuOc77-d2Ma-Atxh-uRml-3qDw-RA7b-wettbB
  LV Write Access        read/write
  LV Creation host, time localhost.localdomain, 2023-04-17 16:09:32 +0900
  LV Status              available
  # open                 0
  LV Size                108.00 MiB
  Current LE             27
  Segments               1
  Allocation             inherit
  Read ahead sectors     auto
  - currently set to     8192
  Block device           253:15
   
  --- Physical volumes ---
  PV Name               /dev/sda     
  PV UUID               6vSBrl-LUJi-TyWx-jCH2-T0ND-DqHT-9sRLpq
  PV Status             allocatable
  Total PE / Free PE    255 / 245		-------10개씩 사용되고 있음
   
  PV Name               /dev/sdb     
  PV UUID               52XEkN-oWZY-anS8-W323-oPEd-0rnn-v0IX0O
  PV Status             allocatable
  Total PE / Free PE    255 / 245		-------10개씩 사용되고 있음
   
  PV Name               /dev/sdc     
  PV UUID               adfL89-v8PF-iNyr-1lLD-1hQA-rf8e-rCZa5g
  PV Status             allocatable
  Total PE / Free PE    255 / 245		-------10개씩 사용되고 있음
   
  PV Name               /dev/sdd     
  PV UUID               L48xDo-TZ93-qOQK-QBtt-RDSC-RGQN-kosJ5E
  PV Status             allocatable
  Total PE / Free PE    255 / 245		-------10개씩 사용되고 있음
   


profile
study log

0개의 댓글