Doesn't appear to be coming. The response I got was that SR-IOV failed their internal 1.0b bios testing and they disabled it. They also said 1540 does not support SR-IOV (which is odd because we know 1GBE works for sure). Was basically told if I want SR-IOV use 1.0a. I had an issue with 1.0a which is why I updated. Was told that they don't have any info on whether or not it will be re-enabled in future release.What a mess.
I just opened my package I ordered and the updated BIOS-Version is 1.0b so no access to SR-IOV-Option in Bios at all.
Any news regarding upcoming Version, which enables it?
Regards
Markus
I was able to pass through the NICs but they wouldn't work with DHCP even though they were plugged in so I kinda stopped there. Not sure if it is config or they don't work with pass through. Let us know how it goes.That sounds not so good.
So I will check, if passthrough the 10gbe-nics to 2 VMs (NAS / Firewall) is possible and enough for my usecase.
I will try this during the next weeks.
Regards
Markus
root@server:~ # pciconf -lv
...
ix0@pci0:3:0:0: class=0x020000 card=0x15ad15d9 chip=0x15ad8086 rev=0x00 hdr=0x00
vendor = 'Intel Corporation'
class = network
subclass = ethernet
ix1@pci0:3:0:1: class=0x020000 card=0x15ad15d9 chip=0x15ad8086 rev=0x00 hdr=0x00
vendor = 'Intel Corporation'
class = network
subclass = ethernet
igb0@pci0:5:0:0: class=0x020000 card=0x152115d9 chip=0x15218086 rev=0x01 hdr=0x00
vendor = 'Intel Corporation'
device = 'I350 Gigabit Network Connection'
class = network
subclass = ethernet
igb1@pci0:5:0:1: class=0x020000 card=0x152115d9 chip=0x15218086 rev=0x01 hdr=0x00
vendor = 'Intel Corporation'
device = 'I350 Gigabit Network Connection'
class = network
subclass = ethernet
...
root@server:~ # cat /boot/loader.conf
...
# vmm for bhyve
vmm_load="YES"
# PCI-10G Interfaces
pptdevs="3/0/0 3/0/1"
...
# REBOOT
root@server:~ # pciconf -lv
...
ppt0@pci0:3:0:0: class=0x020000 card=0x15ad15d9 chip=0x15ad8086 rev=0x00 hdr=0x00
vendor = 'Intel Corporation'
class = network
subclass = ethernet
ppt1@pci0:3:0:1: class=0x020000 card=0x15ad15d9 chip=0x15ad8086 rev=0x00 hdr=0x00
vendor = 'Intel Corporation'
class = network
subclass = ethernet
...
# Create test-dataset
root@server:~ # zfs create -o mountpoint=/bhyve zroot/bhyve
# Create 2 test-dirs (normally separate datasets would be used
root@server:~ # mkdir /bhyve/vm1
root@server:~ # mkdir /bhyve/vm2
# Create a file for each virtual disc
root@server:/bhyve # truncate -s 16G /bhyve/vm1/guest.img
root@server:/bhyve # truncate -s 16G /bhyve/vm2/guest.img
# Download actual image
root@server:/bhyve # fetch ftp://ftp.freebsd.org/pub/FreeBSD/releases/ISO-IMAGES/10.2/FreeBSD-10.2-RELEASE-amd64-bootonly.iso
# We need a tap-Device for internet access for each vm
root@server:/bhyve # ifconfig tap0 create
root@server:/bhyve # ifconfig tap1 create
root@server:/bhyve # sysctl net.link.tap.up_on_open=1
net.link.tap.up_on_open: 0 -> 1
root@server:/bhyve # ifconfig bridge0 create
root@server:/bhyve # ifconfig bridge0 addm igb0 addm tap0 addm tap1
root@server:/bhyve # ifconfig bridge0 up
# Quick and dirty install / run with vmrun.sh
root@server:/bhyve # sh /usr/share/examples/bhyve/vmrun.sh
Usage: vmrun.sh [-ahi] [-c <CPUs>] [-C <console>] [-d <disk file>]
[-e <name=value>] [-g <gdbport> ] [-H <directory>]
[-I <location of installation iso>] [-m <memsize>]
[-t <tapdev>] <vmname>
-h: display this help message
-a: force memory mapped local APIC access
-c: number of virtual cpus (default is 2)
-C: console device (default is stdio)
-d: virtio diskdev file (default is ./diskdev)
-e: set FreeBSD loader environment variable
-g: listen for connection from kgdb at <gdbport>
-H: host filesystem to export to the loader
-i: force boot of the Installation CDROM image
-I: Installation CDROM image location (default is ./release.iso)
-m: memory size (default is 512M)
-p: pass-through a host PCI device at bus/slot/func (e.g. 10/0/0)
-t: tap device for virtio-net (default is tap0)
*** virtual machine name not specified
# Start the installation
root@server:/bhyve # sh /usr/share/examples/bhyve/vmrun.sh -c 4 -m 8192 -d vm1/guest.img -t tap0 -i -I FreeBSD-10.2-RELEASE-amd64-bootonly.iso vm1
# After the installation reboot / poweroff the vm to exit to bhyve
# Do the same with vm2
# Start the vms and add the pci-device
# I have no idea at the moment how to exit the vmrun-script - I always power off the vm and use several terminal windows - sorry for this (use SSH)
root@server:/bhyve # sh /usr/share/examples/bhyve/vmrun.sh -c 4 -m 8192M -d vm1/guest.img -t tap0 -p 3/0/0 vm1
root@server:/bhyve # sh /usr/share/examples/bhyve/vmrun.sh -c 4 -m 8192M -d vm2/guest.img -t tap1 -p 3/0/1 vm2
# There the interfaces are
root@vm1:~ # ifconfig
vtnet0: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=80028<VLAN_MTU,JUMBO_MTU,LINKSTATE>
ether 00:a0:98:27:a7:18
inet 192.168.2.205 netmask 0xffffff00 broadcast 192.168.2.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet 10Gbase-T <full-duplex>
status: active
ix0: flags=8802<BROADCAST,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=8407bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO>
ether 0c:c4:7a:7a:5a:26
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet autoselect
status: no carrier
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
inet6 ::1 prefixlen 128
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x3
inet 127.0.0.1 netmask 0xff000000
nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
root@vm2:~ # ifconfig
vtnet0: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=80028<VLAN_MTU,JUMBO_MTU,LINKSTATE>
ether 00:a0:98:27:42:4c
inet 192.168.2.225 netmask 0xffffff00 broadcast 192.168.2.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet 10Gbase-T <full-duplex>
status: active
ix0: flags=8802<BROADCAST,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=8407bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO>
ether 0c:c4:7a:7a:5a:27
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet autoselect
status: no carrier
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
inet6 ::1 prefixlen 128
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x3
inet 127.0.0.1 netmask 0xff000000
nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
# Connect ix0 to local network (just 1 GB) and try to ping the vm1 from an external host
root@vm1:~ # ifconfig ix0 up
root@vm1:~ # dhclient ix0
DHCPDISCOVER on ix0 to 255.255.255.255 port 67 interval 4
DHCPOFFER from 192.168.2.123
DHCPREQUEST on ix0 to 255.255.255.255 port 67
DHCPACK from 192.168.2.123
# YEAH
macbook-m-lan:2015 markus$ ping 192.168.2.206
PING 192.168.2.206 (192.168.2.206): 56 data bytes
64 bytes from 192.168.2.206: icmp_seq=0 ttl=64 time=0.371 ms
64 bytes from 192.168.2.206: icmp_seq=1 ttl=64 time=0.422 ms
64 bytes from 192.168.2.206: icmp_seq=2 ttl=64 time=0.426 ms
# OK this works
# Now interconnect the two vms and lets get serious
root@vm1:~ # ifconfig ix0 inet 10.10.10.1
root@vm1:~ # ifconfig
vtnet0: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=80028<VLAN_MTU,JUMBO_MTU,LINKSTATE>
ether 00:a0:98:27:a7:18
inet 192.168.2.205 netmask 0xffffff00 broadcast 192.168.2.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet 10Gbase-T <full-duplex>
status: active
ix0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=8407bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO>
ether 0c:c4:7a:7a:5a:26
inet 10.10.10.1 netmask 0xff000000 broadcast 10.255.255.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet autoselect (10Gbase-T <full-duplex,rxpause,txpause>)
status: active
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
inet6 ::1 prefixlen 128
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x3
inet 127.0.0.1 netmask 0xff000000
nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
root@vm2:~ # ifconfig ix0 inet 10.10.10.2
root@vm2:~ # ifconfig
vtnet0: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=80028<VLAN_MTU,JUMBO_MTU,LINKSTATE>
ether 00:a0:98:27:42:4c
inet 192.168.2.225 netmask 0xffffff00 broadcast 192.168.2.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet 10Gbase-T <full-duplex>
status: active
ix0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500
options=8407bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO>
ether 0c:c4:7a:7a:5a:27
inet 10.10.10.2 netmask 0xff000000 broadcast 10.255.255.255
nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL>
media: Ethernet autoselect (10Gbase-T <full-duplex,rxpause,txpause>)
status: active
lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384
options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6>
inet6 ::1 prefixlen 128
inet6 fe80::1%lo0 prefixlen 64 scopeid 0x3
inet 127.0.0.1 netmask 0xff000000
nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL>
# Install iperf and test
root@vm2:~ # iperf -s
------------------------------------------------------------
Server listening on TCP port 5001
TCP window size: 64.0 KByte (default)
------------------------------------------------------------
root@vm1:~ # iperf -c 10.10.10.2
------------------------------------------------------------
Client connecting to 10.10.10.2, TCP port 5001
TCP window size: 32.5 KByte (default)
------------------------------------------------------------
[ 3] local 10.10.10.1 port 36248 connected with 10.10.10.2 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0-10.0 sec 10.9 GBytes 9.38 Gbits/sec
# Doesn't look bad...
# Create a file to transmit over ssh
root@vm2:~ # dd if=/dev/zero of=myfile.dat bs=1G count=10
root@vm1:~ # scp 10.10.10.2:myfile.dat .
Password for root@vm2:
myfile.dat 100% 10GB 148.4MB/s 01:09
# I have no clue if this is fast or not (sshd on vm2 has 30% load / ssh+scp on vm1 about 90 % - based on top-command)
# probably with a better suited encryption (AES-NI) and a RAM-DISK on both sides
root@vm1:/mnt/vm1 # scp -c aes128-gcm@openssh.com 10.10.10.2:/mnt/myfile.dat .
Password for root@vm2:
myfile.dat 100% 1024MB 256.0MB/s 00:04
# OK time for nfs
# VM2 will be the server
# Sorry - no steps, because it didn't work so many debugging stuff would be there
#a single copy of the file created above was slower than scp - about 120 MB/s :( - seems the consumer ssd is not so good
# next try: two ram-disks
root@vm1:/mnt # ls -l /mnt/
total 8
drwxr-xr-x 3 root wheel 512 Nov 9 21:17 vm1
drwxr-xr-x 3 root wheel 512 Nov 9 21:02 vm2
root@vm1:/mnt # time cp /mnt/vm2/myfile.dat /mnt/vm1/
0.014u 3.300s 0:11.81 28.0% 21+178k 9+24576io 0pf+0w
# So I get something about 260 MB/s => 3221225472 Bytes/1024/1024/11,81 second ~2 GBit/s with a 1,5m CAT 5 Cable (sorry got no faster)
# It seems to be the limit I get without any further optimizations
Sorry, but the support for SR-IOV is kind of "strange" in FreeBSD. I would have to apply several patches and this is far beyond my experiences.Any luck with SR IOV in free bsd?
Thanks @Patrick, this will end the wild goose chase and get the matter to rest and move onGuys - getting a clarification from Intel - had one follow-up. Stay tuned as I will try getting something on the main site later today.
Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent
permitted by applicable law.
Last login: Fri Nov 13 04:28:08 2015 from 192.168.7.31
root@pve-dol1:~# lspci -nnk -s 07:10
07:10.0 Ethernet controller [0200]: Intel Corporation I350 Ethernet Controller V
irtual Function [8086:1520] (rev 01)
Subsystem: Super Micro Computer Inc Device [15d9:1521]
Kernel driver in use: vfio-pci
07:10.1 Ethernet controller [0200]: Intel Corporation I350 Ethernet Controller V
irtual Function [8086:1520] (rev 01)
Subsystem: Super Micro Computer Inc Device [15d9:1521]
Kernel driver in use: vfio-pci
07:10.4 Ethernet controller [0200]: Intel Corporation I350 Ethernet Controller V
irtual Function [8086:1520] (rev 01)
Subsystem: Super Micro Computer Inc Device [15d9:1521]
Kernel driver in use: vfio-pci
07:10.5 Ethernet controller [0200]: Intel Corporation I350 Ethernet Controller V
irtual Function [8086:1520] (rev 01)
Subsystem: Super Micro Computer Inc Device [15d9:1521]
Kernel driver in use: vfio-pci
root@pve-dol1:~# cat /etc/pve/qemu-server/899.conf
bootdisk: scsi0
cores: 1
cpu: host
hostpci0: 07:10.1,pcie=1
ide2: none,media=cdrom
machine: q35
memory: 1024
name: testonly
numa: 0
ostype: l26
scsi0: vm:vm-899-disk-1,size=32G
smbios1: uuid=f3b7c95b-add1-41d0-a577-7279059ce495
sockets: 1