dpdk课程学习之练习笔记七(vpp环境搭建及plugin demo测试)

vpp插件环境搭建笔记:
安装vpp环境,运行一个插件的demo相关操作笔记。

1:vpp测试环境安装

1.1:vpp安装

git clone -b stable/1801 https://github.com/FDio/vpp.git
./extras/vagrant/build.sh
make
cd build-root/
dpkg -i vpp-lib_18.01.2-1~g9b554f3_amd64.deb
dpkg -i vpp_18.01.2-1~g9b554f3_amd64.deb
dpkg -i vpp-dev_18.01.2-1~g9b554f3_amd64.deb
dpkg -i vpp-plugins_18.01.2-1~g9b554f3_amd64.deb
#安装成功后 查看
ls /etc/vpp/startup.conf		#存在该文件
ls /usr/lib/vpp_plugins/		#该目录下生成一些lib

1.2:vpp配置

#查看相关网卡信息
root@ubuntu:/home/hlp/vpp_1801/vpp/build-root# ifconfig
eth0      Link encap:Ethernet  HWaddr 00:0c:29:20:c7:0d  
          inet addr:192.168.0.109  Bcast:192.168.0.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe20:c70d/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:41 errors:0 dropped:0 overruns:0 frame:0
          TX packets:10 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:7663 (7.6 KB)  TX bytes:1332 (1.3 KB)

eth1      Link encap:Ethernet  HWaddr 00:0c:29:20:c7:17  
          inet addr:192.168.0.114  Bcast:192.168.0.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe20:c717/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:41 errors:0 dropped:0 overruns:0 frame:0
          TX packets:10 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:7663 (7.6 KB)  TX bytes:1332 (1.3 KB)

eth2      Link encap:Ethernet  HWaddr 00:0c:29:20:c7:21  
          inet addr:192.168.0.111  Bcast:192.168.0.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe20:c721/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:48 errors:0 dropped:0 overruns:0 frame:0
          TX packets:18 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:8654 (8.6 KB)  TX bytes:1868 (1.8 KB)

eth3      Link encap:Ethernet  HWaddr 00:0c:29:20:c7:2b  
          inet addr:192.168.105.142  Bcast:192.168.105.255  Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:fe20:c72b/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:440 errors:0 dropped:0 overruns:0 frame:0
          TX packets:319 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:47076 (47.0 KB)  TX bytes:50800 (50.8 KB)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:224 errors:0 dropped:0 overruns:0 frame:0
          TX packets:224 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1 
          RX bytes:17664 (17.6 KB)  TX bytes:17664 (17.6 KB)
#查看网卡相关pci地址
root@ubuntu:/home/hlp/vpp_1801/vpp/build-root# lspci |grep Eth
02:04.0 Ethernet controller: Intel Corporation 82545EM Gigabit Ethernet Controller (Copper) (rev 01)
03:00.0 Ethernet controller: VMware VMXNET3 Ethernet Controller (rev 01)
0b:00.0 Ethernet controller: VMware VMXNET3 Ethernet Controller (rev 01)
13:00.0 Ethernet controller: VMware VMXNET3 Ethernet Controller (rev 01)

#在配置文件中增加网卡配置 /etc/vpp/startup.conf
cpu {
        main-core 0
}

unix {
        interactive cli-listen 127.0.0.1:5002
        log /tmp/vpp.log
        full-coredump
}

dpdk {
        uio-driver igb_uio
        dev 0000:03:00.0
        dev 0000:0b:00.0
}

1.3:vpp运行

#这里vpp的测试运行依赖于uio 和igb_uio模块,igb_uio模块依赖于dpdk,我用dpdk插入igb_uio模块
export RTE_SDK=/home/hlp/dpdk/dpdk-stable-19.08.2
export RTE_TARGET=x86_64-native-linux-gcc
 ./usertools/dpdk-setup.sh
 #选择43 

#配置内存  这里的大内存页如果使用1G的话需要自己去mount,这个自己也没测试
modprobe uio
#modprobe igb_uio
echo 4 >/sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages 
echo 1024 >/sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages 

#这里我的虚拟机环境设置的是四个网卡,有三个是多队列网卡供dpdk vpp用的
ifconfig eth0 down
ifconfig eth1 down
ifconfig eth2 down

启动vpp进行测试

#我发现运行时这个是个进程,可以后台运行
vpp -c /etc/vpp/startup.conf
#每次运行时如果有提示有环境在运行,可以用这个命令关闭vpp
service vpp stop
#通过vppctl进入,操作vpp配置
vppctl
vpp# show int
              Name               Idx       State          Counter          Count     
GigabitEthernet13/0/0             3        down      
GigabitEthernet3/0/0              1        down      
GigabitEthernetb/0/0              2        down      
local0                            0        down 
#配置ip
vpp# set interface state GigabitEthernet13/0/0 up
vpp# set interface ip address GigabitEthernet13/0/0 192.168.0.119/24
vpp# show int
              Name               Idx       State          Counter          Count     
GigabitEthernet13/0/0             3         up       rx packets                     6
                                                     rx bytes                     469
                                                     drops                          6
                                                     ip4                            4
                                                     ip6                            1
GigabitEthernet3/0/0              1        down      
GigabitEthernetb/0/0              2        down      
local0                            0        down 
#配置vlan
vpp# create bridge-domain 10
bridge-domain 10
vpp# set interface l2 bridge GigabitEthernet3/0/0 10
vpp# create sub-interfaces GigabitEthernet3/0/0 10 dot1q 10
GigabitEthernet3/0/0.10
vpp# show bridge-domain 10 detail
  BD-ID   Index   BSN  Age(min)  Learning  U-Forwrd  UU-Flood  Flooding  ARP-Term  BVI-Intf
   10       1      0     off        on        on        on        on       off       N/A   

           Interface           If-idx ISN  SHG  BVI  TxFlood        VLAN-Tag-Rewrite       
     GigabitEthernet3/0/0        1     1    0    -      *                 none             
vpp# 
vpp# set interface l2 bridge GigabitEthernet3/0/0.10 10
vpp# set interface l2 tag-rewrite GigabitEthernet3/0/0.10 pop 1
vpp# show bridge-domain 10 detail                              
  BD-ID   Index   BSN  Age(min)  Learning  U-Forwrd  UU-Flood  Flooding  ARP-Term  BVI-Intf
   10       1      0     off        on        on        on        on       off       N/A   

           Interface           If-idx ISN  SHG  BVI  TxFlood        VLAN-Tag-Rewrite       
     GigabitEthernet3/0/0        1     1    0    -      *                 none             
    GigabitEthernet3/0/0.10      4     1    0    -      *                 pop-1  
    
#配置veth
sudo ip link add name vpp1out type veth peer name vpp1host
ip addr show vpp1host
ip link set dev vpp1out up
ip link set dev vpp1host up
ifconfig  #可以看到 多了两个网口 vpp1host 和vpp1out
... ......
vpp1host  Link encap:Ethernet  HWaddr ce:75:a0:a1:7f:32  
          inet addr:192.168.3.128  Bcast:0.0.0.0  Mask:255.255.255.0
          inet6 addr: fe80::cc75:a0ff:fea1:7f32/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:8 errors:0 dropped:0 overruns:0 frame:0
          TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:648 (648.0 B)  TX bytes:648 (648.0 B)

vpp1out   Link encap:Ethernet  HWaddr 0a:9c:25:cc:e8:03  
          inet6 addr: fe80::89c:25ff:fecc:e803/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:8 errors:0 dropped:0 overruns:0 frame:0
          TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:648 (648.0 B)  TX bytes:648 (648.0 B)

ip addr add 192.168.3.128/24 dev vpp1host
ip addr show vpp1host

vpp 插件测试运行:

#获取已有的插件代码  拷贝到
root@ubuntu:/home/hlp/vpp/src/plugins# ls pkt*
pktdump.am

pktdump:
pktdump.c  pktdump.h  pktdump_node.c
#增加编译选项  configure.ac 配置中增加 PLUGIN_ENABLED(pktdump)
root@ubuntu:/home/hlp/vpp/src# grep pktdump configure.ac
PLUGIN_ENABLED(pktdump)

#在/src/plugins/Makefile.am  中增加我们的模块
if ENABLE_PKTDUMP_PLUGIN
include pktdump.am
endif

#进行编译运行
make wipe
make build
#运行前要停止vpp服务  service vpp stop
make run
DBGvpp# set interface state GigabitEthernet3/0/0 up
DBGvpp# set int ip address GigabitEthernet3/0/0 192.168.0.119/24
DBGvpp# show int address
GigabitEthernet13/0/0 (dn):
GigabitEthernet3/0/0 (up):
  192.168.0.119/24
GigabitEthernetb/0/0 (dn):
local0 (dn)
DBGvpp# pkt dump GigabitEthernet3/0/0

#######  使用另外的环境 ping 192.168.0.119 会看到相关结果

DBGvpp# 45 00 00 91 00 00 40 00 40 11 79 b3 c0 a8 00 01 ff ff ff ff b7 9c 13 89 00 7d 41 57 01 01 0e 00 e1 2b 
45 00 00 3c 8e cb 00 00 40 01 69 c4 c0 a8 00 6a c0 a8 00 77 08 00 c3 31 00 02 8a 28 61 62 63 64 65 66 
45 00 00 3c 8e cc 00 00 40 01 69 c3 c0 a8 00 6a c0 a8 00 77 08 00 c3 30 00 02 8a 29 61 62 63 64 65 66 
45 00 00 3c 8e cd 00 00 40 01 69 c2 c0 a8 00 6a c0 a8 00 77 08 00 c3 2f 00 02 8a 2a 61 62 63 64 65 66 
45 00 00 3c 8e ce 00 00 40 01 69 c1 c0 a8 00 6a c0 a8 00 77 08 00 c3 2e 00 02 8a 2b 61 62 63 64 65 66 
45 00 00 3c 8e cf 00 00 40 01 69 c0 c0 a8 00 6a c0 a8 00 77 08 00 c3 2d 00 02 8a 2c 61 62 63 64 65 66

1.4:总结

过程中遇到一些问题,需要思考:

1:使用git clone的时候,我的虚拟机如果网卡设置为net模式,一直下载不下来,改为桥接模式,反而很容易

2:直接使用vpp release版本进行安装没有成功,缺少一个version.h的头文件(下载第三方库生成的),不要用这种方式安装。

3:modprobe igb_uio 时会报错,igb_uio模块是dpdk的模块,要使用dpdk编译然后插入内核模块中。

4:相关内存页的设置,内存页的设置有一些方式,解决问题时,发现如果时1G,需要自己做挂载动作。

5:使用**“vpp -c /etc/vpp/startup.conf”**命令启动时,要注意最后的保存,根据报错分析,最可能就是后台有vpp服务在运行,使用service vpp stop进行关闭,并且使用vppctl进入命令行操作

参考课程练习及源码:C/C++Linux服务器开发/后台架构师【零声教育】-学习视频教程-腾讯课堂 (qq.com)

上一篇:亚马逊AWS Kinesis Video Streams with IOT mqtt的demo示例


下一篇:CAS单点登录实现(包含原理配置实现及简易demo)