The Linux server uses the tc command to limit the up and down traffic of the network card

1, Install wondershaper software

Since this software is only available in the CentOS7 third-party library, install the third-party library first
yum install epel-release -y
yum install wondershaper -y
Limit the uplink bandwidth to 1M and the downlink bandwidth to 10M
wondershaper eth0 1024 10240
Clear the original rule (two parameters can be any character)
wondershaper clean clean
View existing rules (view rules for eth0)
wondershaper eth0
Parameter details

Generally, there are three parameters, the first is the name of the network card, the second is the uplink speed (out) limit, and the third is the downlink speed (in) limit.

2, If you do not want to download the software or do not have download conditions, you can use the following steps. The code is in wondershaper software

1. Create a file and add executable permissions
touch wondershaper && chmod +x wondershaper
2. Copy the code below the code to the file you just created and put it in the / usr/bin directory
vi wondershaper
mv wondershaper /usr/sbin
#!/bin/bash 
function show_help {
  echo "Usage: $0 [device] clean|[upload speed in Kb/s] [download speed in Kb/s]"
  echo "Example: $0 eth0 20 500"
  exit
}

if [ $# -eq 0 ]; then
    show_help;
fi

if [ $# -eq 1 ]; then
  tc -s qdisc ls dev $1
  tc -s class ls dev $1
  exit
fi

if [ $# -eq 2 ]; then
  tc qdisc del dev $2 root    2> /dev/null > /dev/null
  tc qdisc del dev $2 ingress 2> /dev/null > /dev/null
  echo Wondershaper queues have been cleared.
  exit
fi

if [ $# -ne 3 ]; then
    show_help;
fi

DOWNLINK=$3
UPLINK=$2
DEV=$1

# low priority source netmasks
NOPRIOHOSTSRC=

# low priority destination netmasks
NOPRIOHOSTDST=

# low priority source ports
NOPRIOPORTSRC=

# low priority destination ports
NOPRIOPORTDST=

# clean existing down- and uplink qdiscs, hide errors
tc qdisc del dev $DEV root    2> /dev/null > /dev/null
tc qdisc del dev $DEV ingress 2> /dev/null > /dev/null

if [ "$1" = "stop" ] 
then 
    exit
fi

tc qdisc add dev $DEV root handle 1: cbq avpkt 1000 bandwidth 10mbit 

tc class add dev $DEV parent 1: classid 1:1 cbq rate ${UPLINK}kbit \
allot 1500 prio 5 bounded isolated 

tc class add dev $DEV parent 1:1 classid 1:10 cbq rate ${UPLINK}kbit \
   allot 1600 prio 1 avpkt 1000

tc class add dev $DEV parent 1:1 classid 1:20 cbq rate $[9*$UPLINK/10]kbit \
   allot 1600 prio 2 avpkt 1000

tc class add dev $DEV parent 1:1 classid 1:30 cbq rate $[8*$UPLINK/10]kbit \
   allot 1600 prio 2 avpkt 1000

tc qdisc add dev $DEV parent 1:10 handle 10: sfq perturb 10
tc qdisc add dev $DEV parent 1:20 handle 20: sfq perturb 10
tc qdisc add dev $DEV parent 1:30 handle 30: sfq perturb 10

tc filter add dev $DEV parent 1:0 protocol ip prio 10 u32 \
      match ip tos 0x10 0xff  flowid 1:10

tc filter add dev $DEV parent 1:0 protocol ip prio 11 u32 \
        match ip protocol 1 0xff flowid 1:10

tc filter add dev $DEV parent 1:0 protocol ip prio 10 u32 \
   match ip protocol 17 0xff \
   match ip sport 4666 0xffff \
   flowid 1:30 

tc filter add dev $DEV parent 1: protocol ip prio 12 u32 \
   match ip protocol 6 0xff \
   match u8 0x05 0x0f at 0 \
   match u16 0x0000 0xffc0 at 2 \
   flowid 1:10

for a in $NOPRIOPORTDST
do
    tc filter add dev $DEV parent 1: protocol ip prio 14 u32 \
       match ip dport $a 0xffff flowid 1:30
done

for a in $NOPRIOPORTSRC
do
    tc filter add dev $DEV parent 1: protocol ip prio 15 u32 \
       match ip sport $a 0xffff flowid 1:30
done

for a in $NOPRIOHOSTSRC
do
    tc filter add dev $DEV parent 1: protocol ip prio 16 u32 \
       match ip src $a flowid 1:30
done

for a in $NOPRIOHOSTDST
do
    tc filter add dev $DEV parent 1: protocol ip prio 17 u32 \
       match ip dst $a flowid 1:30
done

tc filter add dev $DEV parent 1: protocol ip prio 18 u32 \
   match ip dst 0.0.0.0/0 flowid 1:20

tc qdisc add dev $DEV handle ffff: ingress

tc filter add dev $DEV parent ffff: protocol ip prio 50 u32 match ip src \
   0.0.0.0/0 police rate ${DOWNLINK}kbit burst 10k drop flowid :1

Tags: yum EPEL network

Posted on Tue, 05 May 2020 00:05:45 -0400 by fluxem