Share sicsi by device

iSCSI technology is a kind of SCSI instruction set developed by IBM company, which can run on the upper layer of IP protocol for hardware devices,
This kind of instruction set can run SCSI protocol on IP network, and make it possible to route on high-speed Gigabit Ethernet. iSCSI
 Technology is a new storage technology, which combines the existing SCSI interface with Ethernet technology, so that the server can use IP
 The storage devices of the network exchange information with each other.
Server:
fdisk /var/vdb   #Create a partition vda2, where the size is 1G
partprobe 
yum install targetcli.noarch 
systemctl start target
targetcli 
/> /backstores/block create westos:storage1 /dev/vdb2  ##Establish connection with logical volume, / dev/vdb2 is represented by webstos: storage1
Created block storage object westos:storage1 using /dev/vdb2.
/> /iscsi create iqn.2017-12.com.example:storage1      ##Each initiator and target need to be identified by a unique name
Created target iqn.2017-12.com.example:storage1.
Created TPG 1.
/> /iscsi/iqn.2017-12.com.example:storage1/tpg1/acls  create iqn.2017-12.com.example:westoskey   ##Create client connection server key
Created Node ACL for iqn.2017-12.com.example:westoskey  
/> /iscsi/iqn.2017-12.com.example:storage1/tpg1/luns  create /backstores/block/westos:storage1  ##To create a logical unit number in a unit
Created LUN 0.
Created LUN 0->0 mapping in node ACL iqn.2017-12.com.example:westoskey
/> /iscsi/iqn.2017-12.com.example:storage1/tpg1/portals create 172.25.254.11    ##Establish gateway connection, i.e. target portal
Using default IP port 3260
Created network portal 172.25.254.11:3260.
/> exit

firewall-cmd --permanent --add-port=3260/tcp    ##Add shared port of shared disk to firewall policy
firewall-cmd --reload

client:
yum search iscsi   ##Looking for iscsi services
yum install iscsi-initiator-utils.x86_64 -y
systemctl start iscsi
[root@client ~]# vim /etc/iscsi/initiatorname.iscsi 

InitiatorName=iqn.2017-12.com.example:westoskey

systemctl restart iscsi
[root@client ~]# iscsiadm -m discovery -t st -p 172.25.254.11  ##devices detected  
172.25.254.11:3260,1 iqn.2017-12.com.example:storage1
[root@client ~]# iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.11 -l   ##land
Logging in to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] (multiple)
Login to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] successful.

fdisk -l   ##Viewing devices
  Disk /dev/sda   ##One more device found
fdisk /dev/sda    ##Establish maximum partition
mkfs.xfs /dev/sda1
mount /dev/sda1 /mnt/
cd /mnt
[root@server ~]# mount /dev/vdb2 /mnt
mount: /dev/vdb2 is already mounted or /mnt busy     ##Because the client has been mounted, the server cannot mount the same device
yum install tree    
tree /var/lib/iscsi/
//Set power on from:
[root@client ~]# blkid
/dev/vda1: UUID="9bf6b9f7-92ad-441b-848e-0257cbb883d1" TYPE="xfs" 
/dev/vdb1: UUID="SIaPf4-OdHu-OzAW-NlQG-vZ3D-X8ZO-1FK3Ih" TYPE="LVM2_member" 
/dev/mapper/vg0-vo: UUID="12294be2-bdad-4817-b162-038e22313d9f" TYPE="ext4" 
/dev/sda1: UUID="926567a2-281d-4144-98ac-9fe50b4224b0" TYPE="xfs" 
[root@client ~]# vim /etc/fstab
 UUID="926567a2-281d-4144-98ac-9fe50b4224b0"     /mnt  xfs   defaults,_netdev  0 0    ##When there is no writing_ When netdev is used, you need to enter the emergency mode to comment out the contents written in / etc/fstab before restarting
[root@client ~]# mount -a 
[root@client ~]# reboot      
[root@foundation9 ~]# rht-vmctl poweroff desktop    ##It needs to be shut down and turned on for the first time
Powering off desktop..
[root@foundation9 ~]# rht-vmctl start desktop
Starting desktop.


Change fstab file in emergency mode

Server delete:
umount /mnt   ##Uninstall first
[root@client ~]# vim /etc/fstab    ##Delete the configuration content since power on
tree /var/lib/iscsi    ##see
iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.109 -u   ##sign out
iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.109 -o delete   ##delete
tree /var/lib/iscsi
fdisk -l
systemctl restart iscsi
[root@client ~]# fdisk -l    ##sda will not exist after reboot

//Server delete:
targetcli
/> clearconfig confirm=true

Extension:
1.
//Server:
//First create an lvm partition and delete the original one
fdisk /dev/vdb
/dev/vdb2            2048     2099199     1048576   8e  Linux LVM
partprobe 
pvcreate /dev/vdb2
vgcreate iscsi_vg /dev/vdb1
vgdisplay   ##Find parameters for remaining space
lvcreate -l 255 -n iscsi_lv iscsi_vg

targetcli    ##Establishing a shared lvm disk on the server
/> /backstores/block create westos:storage1 /dev/iscsi_vg/iscsi_lv
Created block storage object westos:storage1 using /dev/iscsi_vg/iscsi_lv.
/> /iscsi create iqn.2017-12.com.example:storage1
Created target iqn.2017-12.com.example:storage1.
Created TPG 1.
/> iscsi/iqn.2017-12.com.example:storage1/tpg1/acls create iqn.2017-12.com.example:key1
Created Node ACL for iqn.2017-12.com.example:key1
/> /iscsi/iqn.2017-12.com.example:storage1/tpg1/luns create /backstores/block/westos:storage1
Created LUN 0.
Created LUN 0->0 mapping in node ACL iqn.2017-12.com.example:key1
/> iscsi/iqn.2017-12.com.example:storage1/tpg1/portals create 172.25.254.11
Using default IP port 3260
Created network portal 172.25.254.11:3260.
/> exit

//client:
[root@client ~]# tree /var/lib/iscsi   
/var/lib/iscsi
|-- ifaces
|-- isns
|-- nodes
|-- send_targets
|-- slp
`-- static

6 directories, 0 files   ##Need to delete the original

[root@client ~]# vim /etc/iscsi/initiatorname.iscsi   ##Modify password file
 InitiatorName=iqn.2017-12.com.example:key1
systemctl restart iscsid.service 
systemctl restart iscsi
[root@client ~]# iscsiadm -m discovery -t st -p 172.25.254.11   ##Discovery service, - m detects target, - t specifies the ip and port of target through the iscsi protocol, - p, and the default value of no write port is 3260
172.25.254.11:3260,1 iqn.2017-12.com.example:storage1
[root@client ~]# iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.11 -l   ##Login service
Logging in to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] (multiple)
Login to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] successful.

2.
//Server extension
fdisk /dev/vdb   
/dev/vdb2           2048     2099199     1048576   8e  Linux LVM
/dev/vdb3        2099200     4196351     1048576   8e  Linux LVM
partprobe     ##Add an lvm partition to vdb3
[root@server ~]# pvcreate /dev/vdb3
  Physical volume "/dev/vdb3" successfully created
[root@server ~]# vgextend iscsi_vg /dev/vdb3
  Volume group "iscsi_vg" successfully extended
[root@server ~]# lvextend -L 1500M /dev/iscsi_vg/iscsi_lv
  Extending logical volume iscsi_lv to 1.46 GiB
  Logical volume iscsi_lv successfully resized
[root@server ~]# lvs  ##see
  LV        VG       Attr       LSize Pool Origin Data%  Move Log Cpy%Sync Convert
  iscsi_lv iscsi_vg -wi-ao---- 1.46g

//Client synchronization               
[root@client ~]# iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.11 -u
Logging out of session [sid: 2, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260]
Logout of [sid: 2, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] successful.
[root@client ~]# iscsiadm -m node -T iqn.2017-12.com.example:storage1 -p 172.25.254.11 -l   ##You need to exit and log in to the client before expansion
Logging in to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] (multiple)
Login to [iface: default, target: iqn.2017-12.com.example:storage1, portal: 172.25.254.11,3260] successful.
[root@client ~]# fdisk -l
Disk /dev/sda: 1572 MB

Tags: network yum vim firewall

Posted on Sun, 31 May 2020 11:54:03 -0400 by scottybwoy