Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F144389839
D11401.id32568.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
59 KB
Referenced Files
None
Subscribers
None
D11401.id32568.diff
View Options
Index: etc/mtree/BSD.tests.dist
===================================================================
--- etc/mtree/BSD.tests.dist
+++ etc/mtree/BSD.tests.dist
@@ -470,6 +470,12 @@
..
netinet
..
+ netpfil
+ pf
+ files
+ ..
+ ..
+ ..
opencrypto
..
pjdfstest
Index: targets/pseudo/tests/Makefile.depend
===================================================================
--- targets/pseudo/tests/Makefile.depend
+++ targets/pseudo/tests/Makefile.depend
@@ -234,6 +234,8 @@
tests/sys/mac/portacl \
tests/sys/mqueue \
tests/sys/netinet \
+ tests/sys/netpfil \
+ tests/sys/netpfil/pf \
tests/sys/opencrypto \
tests/sys/pjdfstest/tests \
tests/sys/pjdfstest/tests/chflags \
Index: tests/sys/Makefile
===================================================================
--- tests/sys/Makefile
+++ tests/sys/Makefile
@@ -13,6 +13,7 @@
TESTS_SUBDIRS+= mac
TESTS_SUBDIRS+= mqueue
TESTS_SUBDIRS+= netinet
+TESTS_SUBDIRS+= netpfil
TESTS_SUBDIRS+= opencrypto
TESTS_SUBDIRS+= posixshm
TESTS_SUBDIRS+= sys
Index: tests/sys/netpfil/Kyuafile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/Kyuafile
@@ -0,0 +1,52 @@
+-- $FreeBSD$
+--
+-- Copyright 2011 Google Inc.
+-- All rights reserved.
+--
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+-- * Redistributions of source code must retain the above copyright
+-- notice, this list of conditions and the following disclaimer.
+-- * Redistributions in binary form must reproduce the above copyright
+-- notice, this list of conditions and the following disclaimer in the
+-- documentation and/or other materials provided with the distribution.
+-- * Neither the name of Google Inc. nor the names of its contributors
+-- may be used to endorse or promote products derived from this software
+-- without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- Automatically recurses into any subdirectory that holds a Kyuafile.
+-- As such, this Kyuafile is suitable for installation into the root of
+-- the tests hierarchy as well as into any other subdirectory that needs
+-- "auto-discovery" of tests.
+--
+-- This file is based on the Kyuafile.top sample file distributed in the
+-- kyua-cli package.
+
+syntax(2)
+
+local directory = fs.dirname(current_kyuafile())
+for file in fs.files(directory) do
+ if file == "." or file == ".." then
+ -- Skip these special entries.
+ else
+ local kyuafile_relative = fs.join(file, "Kyuafile")
+ local kyuafile_absolute = fs.join(directory, kyuafile_relative)
+ if fs.exists(kyuafile_absolute) then
+ include(kyuafile_relative)
+ end
+ end
+end
Index: tests/sys/netpfil/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/Makefile
@@ -0,0 +1,9 @@
+# $FreeBSD$
+
+PACKAGE= tests
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil
+TESTS_SUBDIRS+= pf
+KYUAFILE= yes
+
+.include <bsd.test.mk>
Index: tests/sys/netpfil/pf/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+PACKAGE= tests
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil/pf
+BINDIR= ${TESTSDIR}
+
+ATF_TESTS_SH= pf_test_block_drop \
+ pf_test_block_return \
+ pf_test_scrub_forward \
+ pf_test_scrub_forward6
+
+${PACKAGE}FILES+= README
+SUBDIR+= files
+
+.include <bsd.test.mk>
Index: tests/sys/netpfil/pf/README
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/README
@@ -0,0 +1,204 @@
+FreeBSD test suite - pf
+=======================
+
+(Current as of 20170828.)
+
+These tests use kyua and ATF. They create and run VMs with bhyve, and
+use these machines to run various test scenarios.
+
+
+Requirements
+------------
+
+The test require that the host supports bhyve and the following
+modules:
+
+* bridgestp - for bridge networking for inter-virtual machine
+ communication
+* if_bridge - for bridge networking
+* if_tap - for host-to-virtual machine communication
+* nmdm - for virtual serial consoles
+* vmm - for running virtual machines
+* zfs - for creating and cloning virtual machine images
+
+The tests require that a ZFS storage pool exists on the system named
+"zroot", and use the subtree under "zroot/tests/pf" to clone and
+configure VM images to be used with bhyve.
+
+The tests also require an active internet connection, as required
+packages are installed during creation of the virtual machine base
+image, as many of the test scenarios use scapy for traffic generation
+and inspection.
+
+
+Names and numbers used
+----------------------
+
+The following names and numbers have been chosed for use in the tests,
+as a way to avoid collisions with other installed packages on the host
+system:
+
+* Bhyve guests: all virtual machine as named as tests-pf-*
+* Bridge interfaces: bridge6555 to bridge6558
+* File system: the mountpoint of ZFS dataset "zroot/tests/pf" is used
+* IPv4 addresses: block 10.135.213.0/24 is used
+* IPv6 addresses: block fd22:27ca:58fe::/48 is used
+* Installation location: /usr/tests/sys/netpfil/pf
+* Mount points: /mnt/tests/pf is used
+* Tap interfaces: tap19302 to tap19317
+* Virtual consoles: /dev/nmdmtests-pf-*[AB]
+
+
+Required packages
+-----------------
+
+The following packages are automatically installed on the virtual
+machine base image. No packages are installed on the host machine.
+
+* python2.7
+* scapy
+
+
+Installation
+------------
+
+The tests are shipped as part of the FreeBSD source. They are
+installed as part of the FreeBSD test suite, which is installed by
+default when installing FreeBSD from source starting with version 11.
+To install the tests manually:
+
+ % cd {sourcedir} (/usr/src or other)
+ % cd tests/netpfil/pf
+ % make
+ # make install (as root)
+
+The tests should appear under /usr/tests/sys/netpfil/pf. If not,
+creating the directory hierarchy manually might be needed:
+
+ # mkdir -p /usr/tests/sys/netpfil/pf (as root)
+
+The hierarchy is created automatically when the tests are installed as
+part of the complete test suite, which is recommended.
+
+
+First time preparation
+----------------------
+
+Before being able to run any tests, the virtual machine base image
+needs to be created. To do that, as root:
+
+ # /usr/tests/sys/netpfil/pf/files/make_baseimg.sh {sourcedir}
+
+{sourcedir} can be /usr/src or anywhere the FreeBSD source is
+installed.
+
+make_baseimg.sh will rebuild world and kernel and create a FreeBSD
+ready-to-run image. It will then create a ZFS dataset under "zroot"
+named "zroot/tests/pf" and copy the image there for the tests to be
+able to find.
+
+
+Running tests
+-------------
+
+The tests use kyua and ATF, and the usual administration commands work
+here as well. To run the tests, first:
+
+ % cd /usr/tests/sys/netpfil/pf
+
+Then, to list all available tests:
+
+ % kyua list
+
+To run all the tests:
+
+ % kyua test
+
+To run a specific test:
+
+ % kyua test {desired_test}
+
+Note that only one test can be run at a time! That is because the
+tests use the same names for creating virtual machines, and running
+multiple tests in parallel will create a collision. Running all the
+tests as above will run them one by one, so that will not create any
+problem.
+
+
+Architecture of the tests
+-------------------------
+
+The tests use the test frameworks kyua and ATF, so every execution of
+a test gets its own empty temporary working directory, which is
+cleaned up afterwards by the test framework. The tests have also
+access to the "source directory", which is the installation directory
+where the kyua command is issued. This directory is used for reading
+configuration and running helper scripts and functions.
+
+Before running any of the tests, the virtual machine base image needs
+to be created. It is placed in the "zroot/tests/pf" ZFS dataset,
+making it easy for the tests to find. This image is cloned by the
+control script every time a test is run.
+
+The main test script is pf_test, which is run by the framework. This
+script cooperates with the VM control script vmctl.sh via utility
+functions in pf_test_util.sh. The main script takes care of network
+device allocation, address assignment, and VM naming, while the VM
+control script takes care of creating and configuring VM images and
+starting and stopping VMs. These two scripts communicate via command
+line arguments and local files created in the ATF working directory.
+
+All VMs need at least one interface to run SSH on, and preferably more
+interfaces for running tests. The VM control script vmctl.sh receives
+the list of interfaces from pf_test, configures them such that SSH is
+enabled on the first interface, and writes out login information in
+local files for pf_test to read back.
+
+The pf_test script needs to wait for the VMs to boot up and get ready.
+This takes between 60 and 120 seconds, and depends on various factors
+such as the number of network interfaces for the VM. To work around
+this, there is currently a hardcoded call to sleep(1) for each
+individual test.
+
+When the VMs are up and runnning, the main script pf_test uses the SSH
+connections to make further configuration before the tests start. It
+also takes care of wiring the VMs according to the test scenario,
+using local bridge interfaces.
+
+Typically, there will be two VMs, one running pf and one generating
+traffic. Each VM will have two interfaces, one for running SSH and
+one for connecting to the other VM via a network bridge running on the
+host. But more complicated scenarios than this can also be created.
+
+In the installation directory, under files/, various test scripts
+exist, written in Python and using scapy, which can be uploaded to the
+VMs by pf_test. The test scripts usually need configuration, which is
+also uploaded to the VMs by pf_test. The host itself, running pf_test
+and vmctl.sh, does not run any tests directly, that is it does not run
+pfctl, pf, or generate any traffic. This is only done by the VMs.
+
+Tearing down and cleaning up after testing is done by pf_test, which
+delegates VM destruction to vmctl.sh and cleans up everything else by
+itself.
+
+
+Future work
+-----------
+
+Below are some areas of improvement for the pf tests:
+
+* Ability to run multiple tests simultaneously: The main issue is
+ naming of the virtual machines, but other issues might also need
+ attention.
+
+* Start-up of virtual machines: Currently the tests wait for a
+ predefined amount of time, hardcoded as a call to sleep(1), until
+ the virtual machines are done booting. A way to directly check the
+ status of a virtual machine would be desirable. Perhaps by reading
+ from the virtual console?
+
+* Ease of creating new tests: Currently there is a lot of boilerplate
+ code when creating a test (in the body and cleanup function of the
+ test). A way to simplify this would be desirable. Perhaps create a
+ set of main test cases holding default configuration, that can be
+ branched off and modified?
Index: tests/sys/netpfil/pf/files/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/Makefile
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil/pf/files
+BINDIR= ${TESTSDIR}
+
+FILES= pf_test_conf.sh pf_test_util.sh \
+ scrub_forward.py scrub6.py conf.py util.py \
+ make_baseimg.sh vmctl.sh
+
+FILESMODE_make_baseimg.sh= 0555
+FILESMODE_vmctl.sh= 0555
+
+.include <bsd.progs.mk>
Index: tests/sys/netpfil/pf/files/conf.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/conf.py
@@ -0,0 +1,13 @@
+# python2
+
+# Read conf variables from pf_test_conf.sh.
+
+conffile = open('pf_test_conf.sh')
+
+for line in conffile:
+ # Simple test that line is of the form var=val.
+ if len(line.split('=', 1)) == 2:
+ # This will also execute comment lines, but since comment
+ # syntax for Python is the same as for shell scripts, it isn't
+ # a problem.
+ exec(line)
Index: tests/sys/netpfil/pf/files/make_baseimg.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/make_baseimg.sh
@@ -0,0 +1,98 @@
+#! /bin/sh
+
+# make_baseimg.sh - create base image file for tests needing VMs.
+#
+# make_baseimg.sh creates a base image file from source. It needs to
+# be pointed to the source directory. It uses the source build system
+# to create an image and then installs needed packages in it.
+#
+# make_baseimg.sh should be run as root.
+
+name="make_baseimg.sh"
+packages="python27 scapy"
+
+# Change this to point to the source directory.
+sourcedir="${1}"
+
+[ -z "${sourcedir}" ] && {
+ echo "Usage: ${0} {sourcedir}" >&2
+ exit 1
+}
+
+error () {
+ echo "${name}: ${1}" >&2
+}
+
+error_exit () {
+ error "${1}"
+ exit 1
+}
+
+ncpu="$(sysctl -n hw.ncpu)"
+baseimg="zroot/tests/pf/baseimg"
+mountdir="/mnt/tests/pf/baseimg"
+
+cd "${sourcedir}" || error_exit "Cannot access source directory ${sourcedir}."
+#make -j "${ncpu}" buildworld || exit 1
+#make -j "${ncpu}" buildkernel || exit 1
+
+cd release || error_exit "Cannot access release/ directory in source directory."
+# TODO Instead of make clean, use an alternative target directory.
+#make clean || exit 1
+
+sourcedir_canon="$(readlink -f ${sourcedir})"
+
+# Force rebuilding by make release.
+chflags -R noschg "/usr/obj${sourcedir_canon}/release" ||
+ error_exit "Could not run chflags on \
+/usr/obj${sourcedir_canon}/release, wrong object directory?"
+rm -fr "/usr/obj${sourcedir_canon}/release" ||
+ error_exit "Could not remove /usr/obj${sourcedir_canon}/release, \
+wrong object directory?"
+
+make release || error_exit "Cannot run 'make release'."
+make vm-image \
+ WITH_VMIMAGES="1" VMBASE="vm-tests-pf" \
+ VMFORMATS="raw" VMSIZE="3G" ||
+ error_exit "Cannot run 'make vm-image'."
+
+cd "/usr/obj${sourcedir_canon}/release" ||
+ error_exit "Cannot access /usr/obj${sourcedir_canon}/release, \
+wrong object directory?"
+zfs create -p "${baseimg}" ||
+ error_exit "Cannot create ZFS dataset ${baseimg}, \
+is 'zroot' available?"
+
+zmountbase="$(zfs get -H -o value mountpoint "${baseimg}")" ||
+ error_exit "Cannot get mountpoint of dataset ${baseimg}!"
+
+install -o root -g wheel -m 0644 \
+ "vm-tests-pf.raw" "${zmountbase}/img" ||
+ error_exit "Cannot copy image file to ZFS dataset."
+
+mkdir -p "${mountdir}" ||
+ error_exit "Cannot create mountpoint ${mountdir}."
+md="$(mdconfig ${zmountbase}/img)" ||
+ error_exit "Cannot create memory disk for ${zmountbase}/img."
+(
+ mount "/dev/${md}p3" "${mountdir}" || {
+ error "Cannot mount /dev/${md}p3 on ${mountdir}, \
+image file malformed?"
+ return 1
+ }
+ (
+ chroot "${mountdir}" \
+ env ASSUME_ALWAYS_YES="yes" \
+ pkg install ${packages} || {
+ error "Cannot install packages into image file, \
+is there an active internet connection?"
+ return 1
+ }
+ )
+ status="$?"
+ umount "${mountdir}"
+ return "${status}"
+)
+status="$?"
+mdconfig -du "${md}"
+return "${status}"
Index: tests/sys/netpfil/pf/files/pf_test_conf.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/pf_test_conf.sh
@@ -0,0 +1,3 @@
+# pf_test_conf.sh - common configuration for tests.
+
+PYTHON2='python2.7'
Index: tests/sys/netpfil/pf/files/pf_test_util.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/pf_test_util.sh
@@ -0,0 +1,148 @@
+# pf_test_util.sh - utility functions.
+
+. "$(atf_get_srcdir)/files/pf_test_conf.sh"
+
+PF_TEST_DIR="$(atf_get_srcdir)"
+export PF_TEST_DIR
+
+PATH="${PF_TEST_DIR}/files:${PATH}"
+export PATH
+
+# pair_create () {
+# for i in "$@" ; do
+# ifpair="epair${i}"
+# addra="PAIR_${i}_ADDR_A"
+# addrb="PAIR_${i}_ADDR_B"
+# netmask="PAIR_${i}_NETMASK"
+# addr6a="PAIR_${i}_ADDR6_A"
+# addr6b="PAIR_${i}_ADDR6_B"
+# prefixlen="PAIR_${i}_PREFIXLEN"
+# ifconfig "${ifpair}" create
+# eval "ifconfig ${ifpair}a inet \$${addra} netmask \$${netmask}"
+# eval "ifconfig ${ifpair}a inet6 \$${addr6a} prefixlen \$${prefixlen}"
+# eval "ifconfig ${ifpair}b inet \$${addrb} netmask \$${netmask}"
+# eval "ifconfig ${ifpair}b inet6 \$${addr6b} prefixlen \$${prefixlen}"
+# done
+# }
+
+# pair_destroy () {
+# for i in "$@" ; do
+# ifpair="epair${i}"
+# ifconfig "${ifpair}a" destroy
+# done
+# }
+
+# scp_cmd () {
+# vm="${1}" &&
+# sshlogin="$(cat vmctl.${vm}.sshlogin)" &&
+# echo "scp -q -o StrictHostKeyChecking=no \
+# -i vmctl.${vm}.id_rsa ${sshlogin}"
+# }
+
+# ssh_cmd - print SSH command for connecting to virtual machine.
+ssh_cmd () {
+ vm="${1}"
+ sshlogin="$(cat vmctl.${vm}.sshlogin)" || {
+ echo "Could not read SSH login info for VM ${vm}!" >&2
+ return 1
+ }
+ echo "ssh -q -o StrictHostKeyChecking=no \
+ -i vmctl.${vm}.id_rsa ${sshlogin}"
+}
+
+# ssh_login () {
+# vm="${1}"
+# cat "vmctl.${vm}.sshlogin"
+# }
+
+# tap_create - configure tap interface on host machine with matching
+# vtnet interface on virtual machine.
+tap_create () {
+ vm="${1}"
+ tap="${2}"
+ tap_inet="${3}"
+ vtnet="${4}"
+ vtnet_inet="${5}"
+ atf_check ifconfig "${tap}" create inet "${tap_inet}" link0
+ echo "ifconfig_${vtnet}=\"inet ${vtnet_inet}\"" >> \
+ "vmctl.${vm}.rcappend"
+}
+
+# tap6_create - configure tap interface on host machine with matching
+# vtnet interface on virtual machine, IPv6 version.
+tap6_create () {
+ vm="${1}"
+ tap="${2}"
+ tap_inet6="${3}"
+ vtnet="${4}"
+ vtnet_inet6="${5}"
+ atf_check ifconfig "${tap}" create inet6 "${tap_inet6}" link0
+ (
+ echo "ifconfig_${vtnet}=\"inet 0.0.0.0/8\""
+ echo "ifconfig_${vtnet}_ipv6=\"inet6 ${vtnet_inet6}\""
+ ) >> "vmctl.${vm}.rcappend"
+}
+
+# bridge_create - create bridge interface for communication between
+# virtual machines.
+bridge_create () {
+ iface="${1}"
+ shift 1 || atf_fail "bridge_create(): No bridge interface specified."
+ atf_check ifconfig "${iface}" create
+ for i in "$@" ; do
+ atf_check ifconfig "${iface}" addm "${i}"
+ atf_check ifconfig "${iface}" stp "${i}"
+ done
+ atf_check ifconfig "${iface}" up
+}
+
+# vm_create - create and start a virtual machine.
+vm_create () {
+ vm="${1}"
+ shift 1 || atf_fail "vm_create(): No VM specified."
+ # Rest of arguments is network (tap) interfaces.
+ #echo "==== BEGIN ${vm} ====" >&2
+ #cat "vmctl.${vm}.rcappend" >&2
+ #echo "==== END ${vm} ====" >&2
+ vmctl.sh create "${vm}" "zroot/tests/pf" \
+ "/dev/nmdmtests-pf-${vm}B" "$@"
+ case "$?" in
+ (0) ;;
+ (2) atf_skip "Cannot run bhyve, support lacking?" ;;
+ (*) atf_fail "vm_create(): vmctl.sh error." ;;
+ esac
+ # If all went well, valid SSH configuration should have been
+ # created.
+ ssh_cmd_vm="$(ssh_cmd "${vm}")"
+ atf_check [ "x${ssh_cmd_vm}" '!=' "x" ]
+}
+
+# vm_destroy - stop and erase a virtual machine.
+vm_destroy () {
+ vm="${1}"
+ vmctl.sh destroy "${vm}" "zroot/tests/pf"
+}
+
+# vm_ether - get Ethernet address of interface of virtual machine.
+vm_ether () {
+ vm="${1}"
+ iface="${2}"
+ ssh_cmd_vm="$(ssh_cmd "${vm}")" || return 1
+ ether_pattern="\
+[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:\
+[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]"
+ ${ssh_cmd_vm} ifconfig "${iface}" |
+ grep -i 'ether' | grep -io "${ether_pattern}"
+}
+
+# upload_file - Upload file to virtual machine.
+upload_file () {
+ vm="${1}"
+ file="${2}"
+ filename="${3}"
+ [ -z "${filename}" ] && filename="${file}"
+ (
+ cat "$(atf_get_srcdir)/files/${file}" |
+ $(ssh_cmd "${vm}") "cat > /root/${filename}"
+ ) || atf_fail "upload_file(): Could not upload ${file} as ${filename}"
+}
Index: tests/sys/netpfil/pf/files/scrub6.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub6.py
@@ -0,0 +1,141 @@
+# /usr/bin/env python2
+
+import scapy.all as sp
+import scapy.layers.pflog
+
+import itertools as it
+import multiprocessing as mp
+import pickle, random, sys, time
+
+import conf, util
+
+# Data persistent in order to be able to test result later.
+try:
+ data = pickle.load(open('test.pickle'))
+except IOError:
+ data = {
+ 'raw_500': ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500],
+ 'id_rand': random.randrange(1 << 16),
+ 'seq_rand': random.randrange(1 << 16)
+ }
+ f = open('test.pickle', 'w')
+ pickle.dump(data, f)
+ f.close()
+
+raw_500, id_rand, seq_rand = data['raw_500'], data['id_rand'], data['seq_rand']
+
+ether1 = sp.Ether(src=conf.LOCAL_MAC_1, dst=conf.REMOTE_MAC_1)
+ether2 = sp.Ether(src=conf.LOCAL_MAC_2, dst=conf.REMOTE_MAC_2)
+ip1 = sp.IPv6(src=conf.LOCAL_ADDR6_1, dst=conf.LOCAL_ADDR6_3)
+ip2 = sp.IPv6(src=conf.LOCAL_ADDR6_2, dst=conf.LOCAL_ADDR6_3)
+icmp = sp.ICMPv6EchoRequest(id=id_rand, seq=seq_rand, data=raw_500)
+
+p1 = ether1 / ip1 / icmp
+p2 = ether2 / ip2 / icmp
+tofrag1 = ether1 / ip1 / sp.IPv6ExtHdrFragment() / icmp
+tofrag2 = ether2 / ip2 / sp.IPv6ExtHdrFragment() / icmp
+
+def sendonly():
+ time.sleep(1)
+ sp.sendp(sp.fragment6(tofrag1, 400), iface=conf.LOCAL_IF_1, verbose=False)
+ sp.sendp(sp.fragment6(tofrag2, 400), iface=conf.LOCAL_IF_2, verbose=False)
+
+def testresult():
+ success1, success2 = False, False
+
+ defr = util.Defragmenter6()
+ pp1, pp2 = p1.payload, p2.payload # IPv6 layer
+
+ sniffed = sp.sniff(offline='pflog.pcap')
+
+ for p in sniffed:
+ pp_nfrag = defr.more(p)
+ if pp_nfrag is None:
+ print 'CONTINUE'
+ continue
+ pp, nfrag = pp_nfrag
+ print 'SHOW'
+ pp.show()
+
+ # At this point, pp is a packet that has been reassembled from
+ # sniffed packets. We can use nfrag to check how many sniffed
+ # packets it was reassembled from.
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ success1 = success1 or (nfrag == 1 and
+ (pp.src, pp.dst) == (pp1.src, pp1.dst) and
+ str(pp.payload) == str(pp1.payload))
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ success2 = success2 or (nfrag == 2 and
+ (pp.src, pp.dst) == (pp2.src, pp2.dst) and
+ str(pp.payload) == str(pp2.payload))
+
+ if not (success1 and success2):
+ exit(1)
+
+if len(sys.argv) < 2:
+ exit('%s: No command given.' % sys.argv[0])
+
+if sys.argv[1] == 'sendonly':
+ sendonly()
+ exit()
+elif sys.argv[1] == 'testresult':
+ testresult()
+ exit()
+else:
+ exit('%s: Bad command: %s.' % (sys.argv[0], repr(sys.argv[1])))
+
+# Following sniff-and-reassembly code kept for future usage.
+
+sender = mp.Process(target=sendonly)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.LOCAL_IF_3, timeout=10)
+
+sender.join()
+
+for i, p in it.izip(it.count(), sniffed):
+ show = []
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IPv6:
+ show.append(('IPv6', p.src, p.dst))
+ elif type(p) == sp.IPv6ExtHdrFragment:
+ show.append(('Fragment', p.id, p.offset, p.m))
+ elif type(p) == sp.ICMPv6EchoRequest:
+ show.append(('Echo-Request', p.data))
+ elif type(p) == sp.Raw:
+ show.append(('Raw', p.load))
+ p = p.payload
+ print 'Packet', i, ':', show
+
+success1, success2 = False, False
+
+defr = util.Defragmenter6()
+pp1, pp2 = p1.payload, p2.payload # IPv6 layer
+for p in sniffed:
+ pp_nfrag = defr.more(p)
+ if pp_nfrag is None:
+ continue
+ pp, nfrag = pp_nfrag
+
+ # At this point, pp is a packet that has been reassembled from
+ # sniffed packets. We can use nfrag to check how many sniffed
+ # packets it was reassembled from.
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ success1 = success1 or (nfrag == 1 and
+ (pp.src, pp.dst) == (pp1.src, pp1.dst) and
+ str(pp.payload) == str(pp1.payload))
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ success2 = success2 or (nfrag == 2 and
+ (pp.src, pp.dst) == (pp2.src, pp2.dst) and
+ str(pp.payload) == str(pp2.payload))
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/scrub_forward.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub_forward.py
@@ -0,0 +1,74 @@
+# /usr/bin/env python2
+
+import multiprocessing as mp
+import scapy.all as sp
+import conf
+import time
+import random
+import itertools as it
+
+import util
+
+raw_500 = ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500]
+
+ether1 = sp.Ether(src=conf.LOCAL_MAC_1, dst=conf.REMOTE_MAC_1)
+ether2 = sp.Ether(src=conf.LOCAL_MAC_2, dst=conf.REMOTE_MAC_2)
+ip1 = sp.IP(src=conf.LOCAL_ADDR_1,
+ dst=conf.LOCAL_ADDR_3, id=random.randrange(1 << 16))
+ip2 = sp.IP(src=conf.LOCAL_ADDR_2,
+ dst=conf.LOCAL_ADDR_3, id=random.randrange(1 << 16))
+icmp = sp.ICMP(type='echo-request',
+ id=random.randrange(1 << 16), seq=random.randrange(1 << 16))
+
+p1 = ether1 / ip1 / icmp / raw_500
+p2 = ether2 / ip2 / icmp / raw_500
+
+def sendpackets():
+ time.sleep(1)
+ sp.sendp(sp.fragment(p1, 300), iface=conf.LOCAL_IF_1, verbose=False)
+ sp.sendp(sp.fragment(p2, 300), iface=conf.LOCAL_IF_2, verbose=False)
+
+sender = mp.Process(target=sendpackets)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.LOCAL_IF_3, timeout=5)
+
+sender.join()
+
+# for i, p in it.izip(it.count(), sniffed):
+# print '==== Packet', i, '===='
+# p.show()
+# print
+
+success1, success2 = False, False
+
+defr = util.Defragmenter()
+pp1, pp2 = p1.payload, p2.payload # IP layer
+k1, k2 = util.pkey(pp1), util.pkey(pp2)
+for p in sniffed:
+ pp = defr.more(p)
+ if pp is None:
+ continue
+ k = util.pkey(pp)
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ if not success1:
+ # print 'success1 == False'
+ success1 = (k == k1 and defr.stats[k] == 1 and
+ str(pp.payload) == str(pp1.payload))
+ # print 'success1 ==', success1
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ if not success2:
+ # print 'success2 == False'
+ success2 = (k == k2 and defr.stats[k] == 2 and
+ str(pp.payload) == str(pp2.payload))
+ # print 'success2 ==', success2
+
+# print 'success1 ==', success1
+# print 'success2 ==', success2
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/scrub_pflog.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub_pflog.py
@@ -0,0 +1,68 @@
+# /usr/bin/env python2
+
+import multiprocessing as mp
+import scapy.layers.pflog
+import scapy.all as sp
+import conf
+import time
+import random
+import itertools as it
+
+import util
+
+raw_500 = ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500]
+
+ether1 = sp.Ether(src=conf.PAIR_0_MAC_A, dst=conf.PAIR_0_MAC_B)
+ether2 = sp.Ether(src=conf.PAIR_1_MAC_A, dst=conf.PAIR_1_MAC_B)
+ip1 = sp.IP(src=conf.PAIR_0_ADDR_A,
+ dst=conf.PAIR_0_ADDR_B, id=random.randrange(1 << 16))
+ip2 = sp.IP(src=conf.PAIR_1_ADDR_A,
+ dst=conf.PAIR_1_ADDR_B, id=random.randrange(1 << 16))
+icmp = sp.ICMP(type='echo-request',
+ id=random.randrange(1 << 16), seq=random.randrange(1 << 16))
+
+p1 = ether1 / ip1 / icmp / raw_500
+p2 = ether2 / ip2 / icmp / raw_500
+
+def sendpackets():
+ time.sleep(1)
+ sp.sendp(sp.fragment(p1, 300), iface=conf.PAIR_0_IF_A, verbose=False)
+ sp.sendp(sp.fragment(p2, 300), iface=conf.PAIR_1_IF_A, verbose=False)
+
+sender = mp.Process(target=sendpackets)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.PFLOG_IF, timeout=5)
+#sniffed = sp.sniff(iface=conf.PAIR_1_IF_B, timeout=5)
+
+sender.join()
+
+for i, p in it.izip(it.count(), sniffed):
+ if True: #sp.IP in p:
+ print '==== Packet', i, '===='
+ p.show()
+ print
+
+success1, success2 = False, False
+
+defr = util.Defragmenter()
+pp1, pp2 = p1.payload, p2.payload # IP layer
+k1, k2 = util.pkey(pp1), util.pkey(pp2)
+for p in sniffed:
+ pp = defr.more(p)
+ if pp is None:
+ continue
+ k = util.pkey(pp)
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ success1 = success1 or (k == k1 and defr.stats[k] == 1 and
+ str(pp.payload) == str(pp1.payload))
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ success2 = success2 or (k == k2 and defr.stats[k] == 2 and
+ str(pp.payload) == str(pp2.payload))
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/util.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/util.py
@@ -0,0 +1,138 @@
+# python2
+
+import scapy.all as sp
+
+def pkey(packet):
+ '''Packet key.'''
+ return (packet.src, packet.dst, packet.proto, packet.id)
+
+class Defragmenter(object):
+ def __init__(self):
+ self.frags = dict()
+ self.stats = dict()
+ def more(self, packet):
+ '''Add fragmented packet, return whole packet if complete.'''
+
+ # Find IP layer.
+ p = packet
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IP:
+ break
+ p = p.payload
+ else:
+ return
+
+ # # Return directly if not fragmented.
+ # if not ((p.flags & 1) or p.frag): # & 1 for MF
+ # return p
+
+ # Add fragment to its packet group.
+ key, val = pkey(p), (p.frag, p)
+ if key in self.frags:
+ self.frags[key].append(val)
+ self.stats[key] += 1
+ else:
+ self.frags[key] = [val]
+ self.stats[key] = 1
+ frag = self.frags[key]
+ frag.sort()
+
+ # Now all fragments in the group are sorted,
+ # go through them and connect them.
+ i = 0
+ while i + 1 < len(frag):
+ f1, p1 = frag[i]
+ f2, p2 = frag[i + 1]
+ len1, len2 = len(p1.payload), len(p2.payload)
+ if len1 == (f2 - f1) * 8:
+ header1 = sp.IP(tos=p1.tos, flags=p1.flags, ttl=p1.ttl,
+ src=p1.src, dst=p1.dst,
+ proto=p1.proto, id=p1.id)
+ # Now copy MF flag from p2.
+ header1.flags = (header1.flags & ~1) | (p2.flags & 1)
+ # Step 1/2: important for correct length field.
+ p = header1 / (str(p1.payload) + str(p2.payload))
+ # Step 2/2: important to recreate all layers.
+ p = sp.IP(str(p))
+ frag[i:i + 2] = [(f1, p)]
+ else:
+ i += 1
+
+ # Return packet if complete.
+ p = frag[0][1]
+ isfirst, islast = (not p.frag), (not (p.flags & 1))
+ if len(frag) == 1 and isfirst and islast:
+ del self.frags[key]
+ return p
+
+def pkey6(packet):
+ '''Packet key.'''
+ id = packet[sp.IPv6ExtHdrFragment].id
+ return (packet.src, packet.dst, id)
+
+class Defragmenter6(object):
+ def __init__(self):
+ self.frags = dict()
+ self.stats = dict()
+ def more(self, packet):
+ '''Add fragmented packet, return whole packet if complete.
+
+ Returns None on no reassembly, or (p, n), where:
+ p is the defragmented packet ;
+ n is the number of original fragments.'''
+
+ # Find IPv6 layer.
+ p = packet
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IPv6:
+ break
+ p = p.payload
+ else:
+ return
+
+ # Return directly if not fragmented.
+ if type(p.payload) != sp.IPv6ExtHdrFragment:
+ return (p, 1)
+
+ # Add fragment to its packet group.
+ key, val = pkey6(p), (p.payload.offset, p)
+ if key in self.frags:
+ self.frags[key].append(val)
+ self.stats[key] += 1
+ else:
+ self.frags[key] = [val]
+ self.stats[key] = 1
+ frag = self.frags[key]
+ frag.sort()
+
+ # Now all fragments in the group are sorted,
+ # go through them and connect them.
+ i = 0
+ while i + 1 < len(frag):
+ f1, p1 = frag[i]
+ f2, p2 = frag[i + 1]
+ pfrag1, pfrag2 = p1.payload, p2.payload
+ len1, len2 = len(pfrag1.payload), len(pfrag2.payload)
+ if len1 == (f2 - f1) * 8:
+ header = sp.IPv6(tc=p1.tc, fl=p1.fl, hlim=p1.hlim,
+ src=p1.src, dst=p1.dst)
+ headerfrag = sp.IPv6ExtHdrFragment(nh=pfrag1.nh, offset=f1,
+ res1=pfrag1.res1,
+ res2=pfrag1.res2,
+ id=pfrag1.id, m=pfrag2.m)
+ p = (header / headerfrag /
+ (str(pfrag1.payload) + str(pfrag2.payload)))
+ frag[i:i + 2] = [(f1, p)]
+ else:
+ i += 1
+
+ # Return packet if complete.
+ p = frag[0][1]
+ pfrag = p.payload
+ isfirst, islast = (not pfrag.offset), (not pfrag.m)
+ if len(frag) == 1 and isfirst and islast:
+ del self.frags[key]
+ header = sp.IPv6(tc=p.tc, fl=p.fl, hlim=p.hlim, nh=pfrag.nh,
+ src=p.src, dst=p.dst)
+ payload = str(pfrag.payload)
+ return (header / payload, self.stats[key])
Index: tests/sys/netpfil/pf/files/vmctl.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/vmctl.sh
@@ -0,0 +1,216 @@
+#! /bin/sh
+
+# vmctl.sh - control a VM for tests.
+#
+# vmctl.sh runs all necessary zfs commands, only receiving the
+# directory name from the caller. All network configuration visible
+# to the VM is received through the vmctl.${vm}.rcappend file. The
+# first interface specified in the ${ifs} list is the one for which
+# SSH is setup.
+#
+# Exit status:
+# - 0 on success.
+# - 1 on error other than VM not starting.
+# - 2 on VM not starting.
+
+name="vmctl.sh"
+
+debug () {
+ echo "DEBUG: vmctl: (vm=$vm) $@" >&2
+}
+
+error () {
+ echo "${name}: ${1}" >&2
+}
+
+error_exit () {
+ error "${1}"
+ exit 1
+}
+
+#debug "command line: $@"
+
+cmd="${1}"
+vm="${2}"
+zdir="${3}"
+console="${4}"
+shift 4
+ifs="$@"
+
+usage="\
+Usage: ${0} \"create\" {vm} {zdir} {console} {if1 if2 ...}
+ ${0} \"destroy\" {vm} {zdir}"
+
+baseimg="${zdir}/baseimg"
+snap="${zdir}/baseimg@${vm}"
+vmimg="${zdir}/vm.${vm}"
+mountdir="/mnt/tests/pf/vm.${vm}"
+
+# Make sure baseimg exists as a dataset.
+check_baseimg () {
+ # Return with success immediately if mountpoint (and, by
+ # extension, the dataset) exists and contains the image file.
+ zmountbase="$(zfs get -H -o value mountpoint ${baseimg})" &&
+ [ -e "${zmountbase}/img" ] && return
+ error "Cannot find base image, have you run make_baseimg.sh?"
+ return 1
+ #zfs create -p "${baseimg}" || return 1
+ #zmountbase="$(zfs get -H -o value mountpoint ${baseimg})" || return 1
+ # Download image file.
+ # fetch -o "${imgfile}.xz" \
+ # "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # || return 1
+ # cp -ai "/var/tmp/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # "${zmountbase}/img.xz" || return 1
+ # cp -ai "/usr/obj/usr/home/paggas/paggas.freebsd/release/vm-cccc.raw" \
+ # "${zmountbase}/img" || return 1
+}
+
+# # Install system on VM.
+# make_install () {
+# # Copy pf binary files from host to VM. Quick fix while we use
+# # official images, will do proper system installs in the future.
+# cp -a "/boot/kernel/pf.ko" \
+# "${mountdir}/boot/kernel/pf.ko" || return 1
+# cp -a "/sbin/pfctl" \
+# "${mountdir}/sbin/pfctl" || return 1
+# }
+
+write_sshlogin () {
+ addr="$(grep -E "ifconfig_.*inet.*[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" \
+ "vmctl.${vm}.rcappend" |
+ sed -E "s/.*[^0-9]([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*/\1/" |
+ head -n 1)" &&
+ [ "x${addr}" '!=' "x" ] || (
+ error "${0}: write_sshlogin: no IPv4 address found."
+ return 1
+ ) || return 1
+ echo "root@${addr}" > "vmctl.${vm}.sshlogin" || {
+ error "Cannot write SSH login file."
+ return 1
+ }
+}
+
+#debug 'begin'
+case "${cmd}" in
+ (create)
+ check_baseimg || exit 1
+ zfs snap "${snap}" ||
+ error_exit "Cannot create ZFS snapshot ${snap}."
+ zfs clone "${snap}" "${vmimg}" ||
+ error_exit "Cannot clone ZFS snapshot ${snap} to ${vmimg}."
+ ssh-keygen -q -P '' -f "vmctl.${vm}.id_rsa" ||
+ error_exit "Cannot create SSH identify file."
+ write_sshlogin ||
+ error_exit "Cannot write SSH identify file."
+ mkdir -p "${mountdir}" ||
+ error_exit "Cannot create mountpoint ${mountdir}."
+ zmountvm="$(zfs get -H -o value mountpoint ${vmimg})" ||
+ {
+ error "Cannot get mountpoint of dataset ${baseimg}!"
+ return 1
+ }
+ md="$(mdconfig ${zmountvm}/img)" ||
+ error_exit "Cannot create memory disk for ${zmountvm}/img."
+ (
+ mount "/dev/${md}p3" "${mountdir}" ||
+ {
+ error "Cannot mount /dev/${md}p3 on ${mountdir}, \
+image file malformed?"
+ return 1
+ }
+ (
+ #make_install || return 1
+ (
+ umask 077 ||
+ {
+ error "Cannot change umask!"
+ return 1
+ }
+ mkdir -p "${mountdir}/root/.ssh" ||
+ {
+ error "Cannot create ${mountdir}/root/.ssh!"
+ return 1
+ }
+ cat "vmctl.${vm}.id_rsa.pub" >> \
+ "${mountdir}/root/.ssh/authorized_keys"
+ ) ||
+ {
+ error "Cannot write \
+${mountdir}/root/.ssh/authorized_keys!"
+ return 1
+ }
+ (
+ echo "PermitRootLogin without-password" ;
+ echo "StrictModes no" ;
+ ) >> "${mountdir}/etc/ssh/sshd_config" ||
+ {
+ error "Cannot write ${mountdir}/etc/ssh/sshd_config!"
+ return 1
+ }
+ echo "sshd_enable=\"YES\"" >> \
+ "${mountdir}/etc/rc.conf" ||
+ {
+ error "Cannot write ${mountdir}/etc/rc.conf!"
+ return 1
+ }
+ cat "vmctl.${vm}.rcappend" >> \
+ "${mountdir}/etc/rc.conf" ||
+ {
+ error "Cannot write ${mountdir}/etc/rc.conf!"
+ return 1
+ }
+ # Test
+ # echo "ifconfig vtnet0 ether 02:00:00:00:00:01" >> \
+ # "${mountdir}/etc/start_if.vtnet0" || return 1
+ # echo "ifconfig vtnet1 ether 02:00:00:00:00:02" >> \
+ # "${mountdir}/etc/start_if.vtnet1" || return 1
+ #debug 'all append good'
+ )
+ appendstatus="$?"
+ #debug "appendstatus in: ${appendstatus}"
+ umount "${mountdir}"
+ return "${appendstatus}"
+ )
+ appendstatus="$?"
+ mdconfig -du "${md}"
+ rmdir "${mountdir}"
+ #debug "appendstatus out: ${appendstatus}"
+ [ "x${appendstatus}" = 'x0' ] || return 1
+ (
+ ifsopt=''
+ for i in ${ifs} ; do
+ ifsopt="${ifsopt} -t ${i}" ; done
+ #debug "ifsopt: ${ifsopt}"
+ daemon -p "vmctl.${vm}.pid" \
+ sh /usr/share/examples/bhyve/vmrun.sh ${ifsopt} \
+ -d "${zmountvm}/img" -C "${console}" \
+ "tests-pf-${vm}"
+ #sleep 5 # Debug only.
+ sleep 2
+ # Check if bhyve is running, otherwise it has probably
+ # failed to start.
+ [ -e "vmctl.${vm}.pid" ] || exit 2
+ #ls -la '/dev/vmm' >&2
+ )
+ ;;
+ (destroy)
+ bhyvectl --destroy --vm="tests-pf-${vm}" >&2
+ [ -e "vmctl.${vm}.pid" ] && kill "$(cat vmctl.${vm}.pid)"
+ rm "vmctl.${vm}.id_rsa" \
+ "vmctl.${vm}.id_rsa.pub" \
+ "vmctl.${vm}.sshlogin"
+ # Sleep a bit before destroying dataset, so that it doesn't
+ # show up as "busy".
+ sleep 5
+ zfs destroy -R "${snap}"
+ ;;
+ (*)
+ echo "${usage}" >&2
+ exit 1
+ ;;
+esac
+
+status="$?"
+#debug "status: ${status}"
+exit "${status}"
Index: tests/sys/netpfil/pf/files/vmctl.sh.zvol
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/vmctl.sh.zvol
@@ -0,0 +1,127 @@
+#! /bin/sh
+
+# vmctl.sh - control a VM for tests.
+#
+# vmctl.sh runs all necessary zfs commands, only receiving the
+# directory name from the caller. All network configuration visible
+# to the VM is received through the vmctl.${vm}.rcappend file. The
+# first interface specified in the ${ifs} list is the one for which
+# SSH is setup.
+
+cmd="${1}"
+vm="${2}"
+zdir="${3}"
+console="${4}"
+shift 4
+ifs="$@"
+
+usage="\
+Usage: ${0} \"create\" {vm} {zdir} {console} {if1 if2 ...}
+ ${0} \"destroy\" {vm} {zdir}"
+
+baseimg="${zdir}/baseimg"
+snap="${zdir}/baseimg@${vm}"
+vmimg="${zdir}/vm.${vm}"
+mountdir="/mnt/tests/pf/vm.${vm}"
+
+# Make sure baseimg exists as a zvol.
+make_baseimg () {
+ [ -e "/dev/zvol/${baseimg}" ] && return
+ tempdir="$(mktemp -d)"
+ (
+ # Download image file.
+ imgfile="${tempdir}/FreeBSD-11.0-RELEASE-amd64.raw"
+ # fetch -o "${imgfile}.xz" \
+ # "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # || return 1
+ # TODO Use local copy of above for now.
+ cp -ai "/var/tmp/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ "${imgfile}.xz" || return 1
+ unxz "${imgfile}.xz" || return 1
+ size="$(stat -f '%z' ${imgfile})"
+ # Round up to multiple of 16M.
+ [ "$(expr ${size} % 16777216)" = 0 ] ||
+ size="$(expr \( \( $size / 16777216 \) + 1 \) \* 16777216)"
+ # Copy image file to zvol.
+ zfs create -p -V "${size}" "${baseimg}" || return 1
+ dd bs=16M if="${imgfile}" of="/dev/zvol/${baseimg}" || return 1
+ )
+ status="$?"
+ rm -r "${tempdir}"
+ return "${status}"
+}
+
+# Install system on VM.
+make_install () {
+ # TODO Copy pf binary files from host to VM. Quick fix while we
+ # use official images, will do proper system installs in the
+ # future.
+ cp -a "/boot/kernel/pf.ko" \
+ "${mountdir}/boot/kernel/pf.ko" || return 1
+ cp -a "/sbin/pfctl" \
+ "${mountdir}/sbin/pfctl" || return 1
+}
+
+write_sshlogin () {
+ addr="$(grep -E "ifconfig_.*inet.*[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" \
+ "vmctl.${vm}.rcappend" |
+ sed -E "s/.*[^0-9]([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*/\1/" |
+ head -n 1)" || return 1
+ [ "x${addr}" '!=' "x" ] || return 1
+ echo "root@${addr}" > "vmctl.${vm}.sshlogin" || return 1
+}
+
+case "${cmd}" in
+ (create)
+ make_baseimg || exit 1
+ zfs snap "${snap}" || exit 1
+ zfs clone "${snap}" "${vmimg}" || exit 1
+ ssh-keygen -q -P '' -f "vmctl.${vm}.id_rsa" || exit 1
+ write_sshlogin || exit 1
+ mkdir -p "${mountdir}" || exit 1
+ mount "/dev/zvol/${vmimg}p3" "${mountdir}" || exit 1
+ (
+ make_install || return 1
+ (
+ umask 0177 || return 1
+ mkdir -p "${mountdir}/root/.ssh" || return 1
+ cat "vmctl.${vm}.id_rsa" >> \
+ "${mountdir}/root/.ssh/authorized_keys"
+ ) || return 1
+ echo "PermitRootLogin without-password" >> \
+ "${mountdir}/etc/ssh/sshd_config" || return 1
+ echo "sshd_enable=\"YES\"" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ cat "vmctl.${vm}.rcappend" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ )
+ appendstatus="$?"
+ umount "${mountdir}"
+ rmdir "${mountdir}"
+ [ "x${appendstatus}" = 'x0' ] || return 1
+ (
+ ifsopt=''
+ for i in ${ifs} ; do
+ ifsopt="${ifsopt} -t ${i}" ; done
+ daemon -p "vmctl.${vm}.pid" \
+ sh /usr/share/examples/bhyve/vmrun.sh ${ifsopt} \
+ -d "/dev/zvol/${vmimg}" -C "${console}" \
+ "tests-pf-${vm}"
+ )
+ ;;
+ (destroy)
+ bhyvectl --destroy --vm="tests-pf-${vm}"
+ [ -e "vmctl.${vm}.pid" ] && kill "$(cat vmctl.${vm}.pid)"
+ rm "vmctl.${vm}.id_rsa" \
+ "vmctl.${vm}.id_rsa.pub" \
+ "vmctl.${vm}.sshlogin"
+ # TODO Sleep a bit before destroying dataset, so that it
+ # doesn't show up as "busy".
+ sleep 5
+ zfs destroy -R "${snap}"
+ ;;
+ (*)
+ echo "${usage}" >&2
+ exit 1
+ ;;
+esac
Index: tests/sys/netpfil/pf/pf_test_block_drop.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/pf_test_block_drop.sh
@@ -0,0 +1,69 @@
+# Starts two virtual machines, the client and the server. Starts two
+# instances of nc on the server, listening on two different ports, of
+# which one port is blocked-with-drop by pf. The client tries then
+# to connect to the two instances. The test succeeds if one
+# connection succeeds but the other one fails.
+#
+# This test is almost the same as block_return, with the difference
+# that filtered packets are dropped instead of returned (ICMP or RST
+# packet returned).
+
+. "$(atf_get_srcdir)/files/pf_test_util.sh"
+
+atf_init_test_cases () {
+ atf_add_test_case "block_drop"
+}
+
+atf_test_case "block_drop" cleanup
+block_drop_head () {
+ atf_set descr 'Block-with-drop a port and test that it is blocked.'
+ atf_set "require.user" "root"
+}
+block_drop_body () {
+ block_port="50000"
+ pass_port="50001"
+ rules="block drop in on vtnet1 proto tcp to port ${block_port}"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create client tap19303 10.135.213.33/28 vtnet1 10.135.213.35/28
+ tap_create server tap19304 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create server tap19305 10.135.213.34/28 vtnet1 10.135.213.36/28
+ bridge_create bridge6555 tap19303 tap19305
+ # Start VMs.
+ vm_create client tap19302 tap19303
+ vm_create server tap19304 tap19305
+ # Debug
+ #atf_check sleep 900
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 60
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Start test.
+ atf_check daemon -p nc.block.pid $(ssh_cmd server) "nc -l ${block_port}"
+ atf_check daemon -p nc.pass.pid $(ssh_cmd server) "nc -l ${pass_port}"
+ remote_addr_1="10.135.213.36"
+ atf_check -s exit:1 -e empty $(ssh_cmd client) \
+ "nc -z -w 4 ${remote_addr_1} ${block_port}"
+ atf_check -s exit:0 -e ignore $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${pass_port}"
+}
+block_drop_cleanup () {
+ # Stop test.
+ [ -e nc.block.pid ] && kill "$(cat nc.block.pid)"
+ [ -e nc.pass.pid ] && kill "$(cat nc.pass.pid)"
+ # # Stop pf.
+ # $(ssh_cmd server) "pfctl -dFa ;
+ # kldunload -n pf ;
+ # true"
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+}
Index: tests/sys/netpfil/pf/pf_test_block_return.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/pf_test_block_return.sh
@@ -0,0 +1,65 @@
+# Starts two virtual machines, the client and the server. Starts two
+# instances of nc on the server, listening on two different ports, of
+# which one port is blocked-with-return by pf. The client tries then
+# to connect to the two instances. The test succeeds if one
+# connection succeeds but the other one fails.
+
+. "$(atf_get_srcdir)/files/pf_test_util.sh"
+
+atf_init_test_cases () {
+ atf_add_test_case "block_return"
+}
+
+atf_test_case "block_return" cleanup
+block_return_head () {
+ atf_set descr 'Block-with-return a port and test that it is blocked.'
+ atf_set "require.user" "root"
+}
+block_return_body () {
+ block_port="50000"
+ pass_port="50001"
+ rules="block return in on vtnet1 proto tcp to port ${block_port}"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create client tap19303 10.135.213.33/28 vtnet1 10.135.213.35/28
+ tap_create server tap19304 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create server tap19305 10.135.213.34/28 vtnet1 10.135.213.36/28
+ bridge_create bridge6555 tap19303 tap19305
+ # Start VMs.
+ vm_create client tap19302 tap19303
+ vm_create server tap19304 tap19305
+ # Debug
+ #atf_check sleep 900
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 60
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Start test.
+ atf_check daemon -p nc.block.pid $(ssh_cmd server) "nc -l ${block_port}"
+ atf_check daemon -p nc.pass.pid $(ssh_cmd server) "nc -l ${pass_port}"
+ remote_addr_1="10.135.213.36"
+ atf_check -s exit:1 -e empty $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${block_port}"
+ atf_check -s exit:0 -e ignore $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${pass_port}"
+}
+block_return_cleanup () {
+ # Stop test.
+ [ -e nc.block.pid ] && kill "$(cat nc.block.pid)"
+ [ -e nc.pass.pid ] && kill "$(cat nc.pass.pid)"
+ # # Stop pf.
+ # $(ssh_cmd server) "pfctl -dFa ;
+ # kldunload -n pf ;
+ # true"
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+}
Index: tests/sys/netpfil/pf/pf_test_scrub_forward.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/pf_test_scrub_forward.sh
@@ -0,0 +1,99 @@
+# This test starts two virtual machines, the client and the server.
+# It uses scapy to send IPv4 fragmented traffic from the client
+# machine to the server machine. The machines are connected via three
+# interfaces. The client sents traffic to the server via the first
+# two interfaces with the client itself as the destination, which the
+# server forwards via the third interface back to the client. Scrub
+# is activated on the first but not the second interface on the server
+# pf. By examining the forwarded packets as received on the client,
+# we can verify that reassembly occurs on one but not the other
+# interface.
+
+. "$(atf_get_srcdir)/files/pf_test_util.sh"
+
+atf_init_test_cases () {
+ atf_add_test_case "scrub_forward"
+}
+
+atf_test_case "scrub_forward" cleanup
+scrub_forward_head () {
+ atf_set descr 'Scrub defrag with forward on one \
+of two interfaces and test difference.'
+ atf_set "require.user" "root"
+}
+scrub_forward_body () {
+ rules="scrub in on vtnet1 all fragment reassemble
+ pass log (all to pflog0) on { vtnet1 vtnet2 }"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create server tap19303 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create client tap19304 10.135.213.33/28 vtnet1 10.135.213.34/28
+ tap_create server tap19305 10.135.213.35/28 vtnet1 10.135.213.36/28
+ tap_create client tap19306 10.135.213.49/28 vtnet2 10.135.213.50/28
+ tap_create server tap19307 10.135.213.51/28 vtnet2 10.135.213.52/28
+ tap_create client tap19308 10.135.213.65/28 vtnet3 10.135.213.66/28
+ tap_create server tap19309 10.135.213.67/28 vtnet3 10.135.213.68/28
+ bridge_create bridge6555 tap19304 tap19305
+ bridge_create bridge6556 tap19306 tap19307
+ bridge_create bridge6557 tap19308 tap19309
+ # Start VMs.
+ vm_create client tap19302 tap19304 tap19306 tap19308
+ vm_create server tap19303 tap19305 tap19307 tap19309
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 120
+ # Debug
+ #atf_check sleep 900
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Enable forwarding.
+ atf_check -o ignore $(ssh_cmd server) "sysctl net.inet.ip.forwarding=1"
+ # Warm up connections, so that network discovery is complete.
+ atf_check -o ignore $(ssh_cmd client) "ping -c3 10.135.213.36"
+ atf_check -o ignore $(ssh_cmd client) "ping -c3 10.135.213.52"
+ atf_check -o ignore $(ssh_cmd client) "ping -c3 10.135.213.68"
+ # Upload test to VM.
+ upload_file client "scrub_forward.py" "test.py"
+ upload_file client "util.py"
+ (
+ client_ether1="$(vm_ether client vtnet1)" || return 1
+ client_ether2="$(vm_ether client vtnet2)" || return 1
+ server_ether1="$(vm_ether server vtnet1)" || return 1
+ server_ether2="$(vm_ether server vtnet2)" || return 1
+ echo "\
+LOCAL_MAC_1='${client_ether1}'
+LOCAL_MAC_2='${client_ether2}'
+REMOTE_MAC_1='${server_ether1}'
+REMOTE_MAC_2='${server_ether2}'
+LOCAL_ADDR_1='10.135.213.34'
+LOCAL_ADDR_2='10.135.213.50'
+LOCAL_ADDR_3='10.135.213.66'
+REMOTE_ADDR_1='10.135.213.36'
+REMOTE_ADDR_2='10.135.213.52'
+REMOTE_ADDR_3='10.135.213.68'
+LOCAL_IF_1='vtnet1'
+LOCAL_IF_2='vtnet2'
+LOCAL_IF_3='vtnet3'" | \
+ $(ssh_cmd client) "cat > /root/conf.py"
+ ) || atf_fail "Could not upload conf.py to VM."
+ # Run test.
+ atf_check -o ignore $(ssh_cmd client) "cd /root && ${PYTHON2} test.py"
+}
+scrub_forward_cleanup () {
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig bridge6556 destroy
+ ifconfig bridge6557 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+ ifconfig tap19306 destroy
+ ifconfig tap19307 destroy
+ ifconfig tap19308 destroy
+ ifconfig tap19309 destroy
+}
Index: tests/sys/netpfil/pf/pf_test_scrub_forward6.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/pf_test_scrub_forward6.sh
@@ -0,0 +1,125 @@
+# This test starts two virtual machines, the client and the server.
+# It uses scapy to send IPv6 fragmented traffic from the client
+# machine to the server machine. The machines are connected via three
+# interfaces. The client sents traffic to the server via the first
+# two interfaces with the client itself as the destination, which the
+# server forwards via the third interface back to the client. Scrub
+# is activated on the first but not the second interface on the server
+# pf. Tcpdump is run on pflog on the server, capturing traffic in a
+# pcap file, which is copied back to the client for examination. By
+# examining the captured packets, we can verify that reassembly occurs
+# on one but not the other interface.
+
+. "$(atf_get_srcdir)/files/pf_test_util.sh"
+
+atf_init_test_cases () {
+ atf_add_test_case "scrub_forward6"
+}
+
+atf_test_case "scrub_forward6" cleanup
+scrub_forward6_head () {
+ atf_set descr 'Scrub defrag with forward on one \
+of two interfaces and test difference, IPv6 version.'
+ atf_set "require.user" "root"
+}
+scrub_forward6_body () {
+ rules="scrub in on vtnet1 all fragment reassemble
+ pass log (all to pflog0) on { vtnet1 vtnet2 }"
+ # Set up networking.
+ # Need at least one IPv4 interface per VM for SSH autoconf.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create server tap19303 10.135.213.17/28 vtnet0 10.135.213.18/28
+ # tap6_create client tap19302 fd22:27ca:58fe::/64 \
+ # vtnet0 fd22:27ca:58fe::1/64
+ # tap6_create server tap19303 fd22:27ca:58fe:1::/64 \
+ # vtnet0 fd22:27ca:58fe:1::1/64
+ tap6_create client tap19304 fd22:27ca:58fe:2::/64 \
+ vtnet1 fd22:27ca:58fe:2::1/64
+ tap6_create server tap19305 fd22:27ca:58fe:2::2/64 \
+ vtnet1 fd22:27ca:58fe:2::3/64
+ tap6_create client tap19306 fd22:27ca:58fe:3::/64 \
+ vtnet2 fd22:27ca:58fe:3::1/64
+ tap6_create server tap19307 fd22:27ca:58fe:3::2/64 \
+ vtnet2 fd22:27ca:58fe:3::3/64
+ tap6_create client tap19308 fd22:27ca:58fe:4::/64 \
+ vtnet3 fd22:27ca:58fe:4::1/64
+ tap6_create server tap19309 fd22:27ca:58fe:4::2/64 \
+ vtnet3 fd22:27ca:58fe:4::3/64
+ bridge_create bridge6555 tap19304 tap19305
+ bridge_create bridge6556 tap19306 tap19307
+ bridge_create bridge6557 tap19308 tap19309
+ # Start VMs.
+ vm_create client tap19302 tap19304 tap19306 tap19308
+ vm_create server tap19303 tap19305 tap19307 tap19309
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 120
+ # Debug
+ #atf_check sleep 900
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf pflog"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Enable forwarding.
+ atf_check -o ignore $(ssh_cmd server) "sysctl net.inet6.ip6.forwarding=1"
+ # Warm up connections, so that network discovery is complete.
+ atf_check -o ignore $(ssh_cmd client) "ping6 -c3 fd22:27ca:58fe:2::3"
+ atf_check -o ignore $(ssh_cmd client) "ping6 -c3 fd22:27ca:58fe:3::3"
+ atf_check -o ignore $(ssh_cmd client) "ping6 -c3 fd22:27ca:58fe:4::3"
+ # Upload test to VM.
+ upload_file client "scrub6.py" "test.py"
+ upload_file client "util.py"
+ (
+ client_ether1="$(vm_ether client vtnet1)" || return 1
+ client_ether2="$(vm_ether client vtnet2)" || return 1
+ server_ether1="$(vm_ether server vtnet1)" || return 1
+ server_ether2="$(vm_ether server vtnet2)" || return 1
+ echo "\
+LOCAL_MAC_1='${client_ether1}'
+LOCAL_MAC_2='${client_ether2}'
+REMOTE_MAC_1='${server_ether1}'
+REMOTE_MAC_2='${server_ether2}'
+LOCAL_ADDR6_1='fd22:27ca:58fe:2::1'
+LOCAL_ADDR6_2='fd22:27ca:58fe:3::1'
+LOCAL_ADDR6_3='fd22:27ca:58fe:4::1'
+REMOTE_ADDR6_1='fd22:27ca:58fe:2::3'
+REMOTE_ADDR6_2='fd22:27ca:58fe:3::3'
+REMOTE_ADDR6_3='fd22:27ca:58fe:4::3'
+LOCAL_IF_1='vtnet1'
+LOCAL_IF_2='vtnet2'
+LOCAL_IF_3='vtnet3'" | \
+ $(ssh_cmd client) "cat > /root/conf.py"
+ ) || atf_fail "Could not upload conf.py to VM."
+ # Run test.
+ # Run tcpdump for 15 seconds.
+ atf_check daemon -p tcpdump.pid $(ssh_cmd server) \
+ "cd /root && tcpdump -G 15 -W 1 -i pflog0 -w pflog.pcap"
+ atf_check sleep 2
+ atf_check -o ignore $(ssh_cmd client) \
+ "cd /root && ${PYTHON2} test.py sendonly"
+ # Wait for tcpdump to finish.
+ atf_check sleep 15
+ #atf_check kill "$(cat tcpdump.pid)"
+ $(ssh_cmd server) "cat /root/pflog.pcap" > "pflog.pcap" ||
+ atf_fail "Could not download pflog.pcap from VM."
+ $(ssh_cmd client) "cat > /root/pflog.pcap" < "pflog.pcap" ||
+ atf_fail "Could not upload pflog.pcap to VM."
+ atf_check -o ignore $(ssh_cmd client) \
+ "cd /root && ${PYTHON2} test.py testresult"
+}
+scrub_forward6_cleanup () {
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig bridge6556 destroy
+ ifconfig bridge6557 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+ ifconfig tap19306 destroy
+ ifconfig tap19307 destroy
+ ifconfig tap19308 destroy
+ ifconfig tap19309 destroy
+}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Mon, Feb 9, 4:49 AM (19 h, 52 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28539608
Default Alt Text
D11401.id32568.diff (59 KB)
Attached To
Mode
D11401: Kernel pf tests
Attached
Detach File
Event Timeline
Log In to Comment