Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F132919790
D11401.id32464.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
52 KB
Referenced Files
None
Subscribers
None
D11401.id32464.diff
View Options
Index: etc/mtree/BSD.tests.dist
===================================================================
--- etc/mtree/BSD.tests.dist
+++ etc/mtree/BSD.tests.dist
@@ -470,6 +470,10 @@
..
netinet
..
+ netpfil
+ pf
+ ..
+ ..
opencrypto
..
pjdfstest
Index: targets/pseudo/tests/Makefile.depend
===================================================================
--- targets/pseudo/tests/Makefile.depend
+++ targets/pseudo/tests/Makefile.depend
@@ -234,6 +234,8 @@
tests/sys/mac/portacl \
tests/sys/mqueue \
tests/sys/netinet \
+ tests/sys/netpfil \
+ tests/sys/netpfil/pf \
tests/sys/opencrypto \
tests/sys/pjdfstest/tests \
tests/sys/pjdfstest/tests/chflags \
Index: tests/sys/Makefile
===================================================================
--- tests/sys/Makefile
+++ tests/sys/Makefile
@@ -13,6 +13,7 @@
TESTS_SUBDIRS+= mac
TESTS_SUBDIRS+= mqueue
TESTS_SUBDIRS+= netinet
+TESTS_SUBDIRS+= netpfil
TESTS_SUBDIRS+= opencrypto
TESTS_SUBDIRS+= posixshm
TESTS_SUBDIRS+= sys
Index: tests/sys/netpfil/Kyuafile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/Kyuafile
@@ -0,0 +1,52 @@
+-- $FreeBSD$
+--
+-- Copyright 2011 Google Inc.
+-- All rights reserved.
+--
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+-- * Redistributions of source code must retain the above copyright
+-- notice, this list of conditions and the following disclaimer.
+-- * Redistributions in binary form must reproduce the above copyright
+-- notice, this list of conditions and the following disclaimer in the
+-- documentation and/or other materials provided with the distribution.
+-- * Neither the name of Google Inc. nor the names of its contributors
+-- may be used to endorse or promote products derived from this software
+-- without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- Automatically recurses into any subdirectory that holds a Kyuafile.
+-- As such, this Kyuafile is suitable for installation into the root of
+-- the tests hierarchy as well as into any other subdirectory that needs
+-- "auto-discovery" of tests.
+--
+-- This file is based on the Kyuafile.top sample file distributed in the
+-- kyua-cli package.
+
+syntax(2)
+
+local directory = fs.dirname(current_kyuafile())
+for file in fs.files(directory) do
+ if file == "." or file == ".." then
+ -- Skip these special entries.
+ else
+ local kyuafile_relative = fs.join(file, "Kyuafile")
+ local kyuafile_absolute = fs.join(directory, kyuafile_relative)
+ if fs.exists(kyuafile_absolute) then
+ include(kyuafile_relative)
+ end
+ end
+end
Index: tests/sys/netpfil/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/Makefile
@@ -0,0 +1,7 @@
+# $FreeBSD$
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil
+TESTS_SUBDIRS+= pf
+KYUAFILE= yes
+
+.include <bsd.test.mk>
Index: tests/sys/netpfil/pf/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/Makefile
@@ -0,0 +1,10 @@
+# $FreeBSD$
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil/pf
+BINDIR= ${TESTSDIR}
+
+ATF_TESTS_SH= pf_test
+
+SUBDIR+= files
+
+.include <bsd.test.mk>
Index: tests/sys/netpfil/pf/README
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/README
@@ -0,0 +1,202 @@
+FreeBSD test suite - pf
+=======================
+
+(Current as of 20170828.)
+
+These tests use kyua and ATF. They create and run VMs with bhyve, and
+use these machines to run various test scenarios.
+
+
+Requirements
+------------
+
+The test require that the host supports bhyve and the following
+modules:
+
+* bridgestp - for bridge networking for inter-virtual machine
+ communication
+* if_bridge - for bridge networking
+* if_tap - for host-to-virtual machine communication
+* nmdm - for virtual serial consoles
+* vmm - for running virtual machines
+* zfs - for creating and cloning virtual machine images
+
+The tests require that a ZFS storage pool exists on the system named
+"zroot", and use the subtree under "zroot/tests/pf" to clone and
+configure VM images to be used with bhyve.
+
+The tests also require an active internet connection, as required
+packages are installed during creation of the virtual machine base
+image, as many of the test scenarios use scapy for traffic generation
+and inspection.
+
+
+Names and numbers used
+----------------------
+
+The following names and numbers have been chosed for use in the tests,
+as a way to avoid collisions with other installed packages on the host
+system:
+
+* Bhyve guests: all virtual machine as named as tests-pf-*
+* Bridge interfaces: bridge6555 to bridge6558
+* File system: the mountpoint of ZFS dataset "zroot/tests/pf" is used
+* IPv4 addresses: block 10.135.213.0/24 is used
+* Installation location: /usr/tests/sys/netpfil/pf
+* Tap interfaces: tap19302 to tap19317
+* Virtual consoles: /dev/nmdmtests-pf-*[AB]
+
+
+Required packages
+-----------------
+
+The following packages are automatically installed on the virtual
+machine base image. No packages are installed on the host machine.
+
+* python2.7
+* scapy
+
+
+Installation
+------------
+
+The tests are shipped as part of the FreeBSD source. They are
+installed as part of the FreeBSD test suite, which is installed by
+default when installing FreeBSD from source starting with version 11.
+To install the tests manually:
+
+ % cd {sourcedir} (/usr/src or other)
+ % cd tests/netpfil/pf
+ % make
+ # make install (as root)
+
+The tests should appear under /usr/tests/sys/netpfil/pf. If not,
+creating the directory hierarchy manually might be needed:
+
+ # mkdir -p /usr/tests/sys/netpfil/pf (as root)
+
+The hierarchy is created automatically when the tests are installed as
+part of the complete test suite, which is recommended.
+
+
+First time preparation
+----------------------
+
+Before being able to run any tests, the virtual machine base image
+needs to be created. To do that, as root:
+
+ # /usr/tests/sys/netpfil/pf/files/make_baseimg.sh {sourcedir}
+
+{sourcedir} can be /usr/src or anywhere the FreeBSD source is
+installed.
+
+make_baseimg.sh will rebuild world and kernel and create a FreeBSD
+ready-to-run image. It will then create a ZFS dataset under "zroot"
+named "zroot/tests/pf" and copy the image there for the tests to be
+able to find.
+
+
+Running tests
+-------------
+
+The tests use kyua and ATF, and the usual administration commands work
+here as well. To run the tests, first:
+
+ % cd /usr/tests/sys/netpfil/pf
+
+Then, to list all available tests:
+
+ % kyua list
+
+To run all the tests:
+
+ % kyua test
+
+To run a specific test:
+
+ % kyua test {desired_test}
+
+Note that only one test can be run at a time! That is because the
+tests use the same names for creating virtual machines, and running
+multiple tests in parallel will create a collision. Running all the
+tests as above will run them one by one, so that will not create any
+problem.
+
+
+Architecture of the tests
+-------------------------
+
+The tests use the test frameworks kyua and ATF, so every execution of
+a test gets its own empty temporary working directory, which is
+cleaned up afterwards by the test framework. The tests have also
+access to the "source directory", which is the installation directory
+where the kyua command is issued. This directory is used for reading
+configuration and running helper scripts and functions.
+
+Before running any of the tests, the virtual machine base image needs
+to be created. It is placed in the "zroot/tests/pf" ZFS dataset,
+making it easy for the tests to find. This image is cloned by the
+control script every time a test is run.
+
+The main test script is pf_test, which is run by the framework. This
+script cooperates with the VM control script vmctl.sh via utility
+functions in pf_test_util.sh. The main script takes care of network
+device allocation, address assignment, and VM naming, while the VM
+control script takes care of creating and configuring VM images and
+starting and stopping VMs. These two scripts communicate via command
+line arguments and local files created in the ATF working directory.
+
+All VMs need at least one interface to run SSH on, and preferably more
+interfaces for running tests. The VM control script vmctl.sh receives
+the list of interfaces from pf_test, configures them such that SSH is
+enabled on the first interface, and writes out login information in
+local files for pf_test to read back.
+
+The pf_test script needs to wait for the VMs to boot up and get ready.
+This takes between 60 and 120 seconds, and depends on various factors
+such as the number of network interfaces for the VM. To work around
+this, there is currently a hardcoded call to sleep(1) for each
+individual test.
+
+When the VMs are up and runnning, the main script pf_test uses the SSH
+connections to make further configuration before the tests start. It
+also takes care of wiring the VMs according to the test scenario,
+using local bridge interfaces.
+
+Typically, there will be two VMs, one running pf and one generating
+traffic. Each VM will have two interfaces, one for running SSH and
+one for connecting to the other VM via a network bridge running on the
+host. But more complicated scenarios than this can also be created.
+
+In the installation directory, under files/, various test scripts
+exist, written in Python and using scapy, which can be uploaded to the
+VMs by pf_test. The test scripts usually need configuration, which is
+also uploaded to the VMs by pf_test. The host itself, running pf_test
+and vmctl.sh, does not run any tests directly, that is it does not run
+pfctl, pf, or generate any traffic. This is only done by the VMs.
+
+Tearing down and cleaning up after testing is done by pf_test, which
+delegates VM destruction to vmctl.sh and cleans up everything else by
+itself.
+
+
+Future work
+-----------
+
+Below are some areas of improvement for the pf tests:
+
+* Ability to run multiple tests simultaneously: The main issue is
+ naming of the virtual machines, but other issues might also need
+ attention.
+
+* Start-up of virtual machines: Currently the tests wait for a
+ predefined amount of time, hardcoded as a call to sleep(1), until
+ the virtual machines are done booting. A way to directly check the
+ status of a virtual machine would be desirable. Perhaps by reading
+ from the virtual console?
+
+* Ease of creating new tests: Currently there is a lot of boilerplate
+ code when creating a test (in the body and cleanup function of the
+ test). A way to simplify this would be desirable. Perhaps create a
+ set of main test cases holding default configuration, that can be
+ branched off and modified?
Index: tests/sys/netpfil/pf/files/Makefile
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/Makefile
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+TESTSDIR= ${TESTSBASE}/sys/netpfil/pf/files
+BINDIR= ${TESTSDIR}
+
+FILES= pf_test_conf.sh pf_test_util.sh \
+ scrub_forward.py scrub6.py conf.py util.py \
+ make_baseimg.sh vmctl.sh
+
+FILESMODE_make_baseimg.sh= 0555
+FILESMODE_vmctl.sh= 0555
+
+.include <bsd.progs.mk>
Index: tests/sys/netpfil/pf/files/conf.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/conf.py
@@ -0,0 +1,13 @@
+# python2
+
+# Read conf variables from pf_test_conf.sh.
+
+conffile = open('pf_test_conf.sh')
+
+for line in conffile:
+ # Simple test that line is of the form var=val.
+ if len(line.split('=', 1)) == 2:
+ # This will also execute comment lines, but since comment
+ # syntax for Python is the same as for shell scripts, it isn't
+ # a problem.
+ exec(line)
Index: tests/sys/netpfil/pf/files/make_baseimg.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/make_baseimg.sh
@@ -0,0 +1,57 @@
+#! /bin/sh
+
+# make_baseimg.sh - create base image file for tests needing VMs.
+#
+# make_baseimg.sh creates a base image file from source. It needs to
+# be pointed to the source directory. It uses the source build system
+# to create an image and then installs needed packages in it.
+#
+# make_baseimg.sh should be run as root.
+
+# Change this to point to the source directory.
+sourcedir="${1}"
+
+[ -z "${sourcedir}" ] && {
+ echo "Usage: ${0} {sourcedir}" >&2
+ exit 1
+}
+
+ncpu="$(sysctl -n hw.ncpu)"
+baseimg="zroot/tests/pf/baseimg"
+zmountbase="$(zfs get -H -o value mountpoint "${baseimg}")" || exit 1
+mountdir="/mnt/tests/pf/baseimg"
+
+cd "${sourcedir}" || exit 1
+make -j "${ncpu}" buildworld || exit 1
+make -j "${ncpu}" buildkernel || exit 1
+
+cd release || exit 1
+# TODO Instead of make clean, use an alternative target directory.
+make clean || exit 1
+rm -r "/usr/obj${sourcedir_canon}/release" # force rebuilding by make release
+make release || exit 1
+make vm-image \
+ WITH_VMIMAGES="1" VMBASE="vm-tests-pf" \
+ VMFORMATS="raw" VMSIZE="3G" || exit 1
+sourcedir_canon="$(readlink -f ${sourcedir})"
+
+cd "/usr/obj${sourcedir_canon}/release" || exit 1
+zfs create -p "${baseimg}" || exit 1
+cp -ai vm-tests-pf.raw "${zmountbase}/img" || exit 1
+
+mkdir -p "${mountdir}" || exit 1
+md="$(mdconfig ${zmountbase}/img)" || exit 1
+(
+ mount "/dev/${md}p3" "${mountdir}" || return 1
+ (
+ chroot "${mountdir}" \
+ env ASSUME_ALWAYS_YES="yes" \
+ pkg install "python2.7" "scapy" || return 1
+ )
+ status="$?"
+ umount "${mountdir}"
+ return "${status}"
+)
+status="$?"
+mdconfig -du "${md}"
+return "${status}"
Index: tests/sys/netpfil/pf/files/pf_test_conf.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/pf_test_conf.sh
@@ -0,0 +1,3 @@
+# pf_test_conf.sh - common configuration for tests.
+
+PYTHON2='python2.7'
Index: tests/sys/netpfil/pf/files/pf_test_util.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/pf_test_util.sh
@@ -0,0 +1,123 @@
+# pf_test_util.sh - utility functions.
+
+. "$(atf_get_srcdir)/files/pf_test_conf.sh"
+
+PF_TEST_DIR="$(atf_get_srcdir)"
+export PF_TEST_DIR
+
+PATH="${PF_TEST_DIR}/files:${PATH}"
+export PATH
+
+# pair_create () {
+# for i in "$@" ; do
+# ifpair="epair${i}"
+# addra="PAIR_${i}_ADDR_A"
+# addrb="PAIR_${i}_ADDR_B"
+# netmask="PAIR_${i}_NETMASK"
+# addr6a="PAIR_${i}_ADDR6_A"
+# addr6b="PAIR_${i}_ADDR6_B"
+# prefixlen="PAIR_${i}_PREFIXLEN"
+# ifconfig "${ifpair}" create
+# eval "ifconfig ${ifpair}a inet \$${addra} netmask \$${netmask}"
+# eval "ifconfig ${ifpair}a inet6 \$${addr6a} prefixlen \$${prefixlen}"
+# eval "ifconfig ${ifpair}b inet \$${addrb} netmask \$${netmask}"
+# eval "ifconfig ${ifpair}b inet6 \$${addr6b} prefixlen \$${prefixlen}"
+# done
+# }
+
+# pair_destroy () {
+# for i in "$@" ; do
+# ifpair="epair${i}"
+# ifconfig "${ifpair}a" destroy
+# done
+# }
+
+# scp_cmd () {
+# vm="${1}" &&
+# sshlogin="$(cat vmctl.${vm}.sshlogin)" &&
+# echo "scp -q -o StrictHostKeyChecking=no \
+# -i vmctl.${vm}.id_rsa ${sshlogin}"
+# }
+
+# ssh_cmd - print SSH command for connecting to virtual machine.
+ssh_cmd () {
+ vm="${1}" &&
+ sshlogin="$(cat vmctl.${vm}.sshlogin)" &&
+ echo "ssh -q -o StrictHostKeyChecking=no \
+-i vmctl.${vm}.id_rsa ${sshlogin}"
+}
+
+# ssh_login () {
+# vm="${1}"
+# cat "vmctl.${vm}.sshlogin"
+# }
+
+# tap_create - configure tap interface on host machine with matching
+# vtnet interface on virtual machine.
+tap_create () {
+ vm="${1}"
+ tap="${2}"
+ tap_inet="${3}"
+ vtnet="${4}"
+ vtnet_inet="${5}"
+ atf_check ifconfig "${tap}" create inet "${tap_inet}" link0
+ echo "ifconfig_${vtnet}=\"inet ${vtnet_inet}\"" >> "vmctl.${vm}.rcappend"
+}
+
+# bridge_create - create bridge interface for communication between
+# virtual machines.
+bridge_create () {
+ iface="${1}"
+ shift 1 || atf_fail "bridge_create"
+ atf_check ifconfig "${iface}" create
+ for i in "$@" ; do
+ atf_check ifconfig "${iface}" addm "${i}"
+ atf_check ifconfig "${iface}" stp "${i}"
+ done
+ atf_check ifconfig "${iface}" up
+}
+
+# vm_create - create and start a virtual machine.
+vm_create () {
+ vm="${1}"
+ shift 1 || atf_fail "vm_create"
+ # Rest of arguments is network (tap) interfaces.
+ #echo "==== BEGIN ${vm} ====" >&2
+ #cat "vmctl.${vm}.rcappend" >&2
+ #echo "==== END ${vm} ====" >&2
+ atf_check -e ignore \
+ vmctl.sh create "${vm}" "zroot/tests/pf" \
+ "/dev/nmdmtests-pf-${vm}B" "$@"
+ # If all went well, valid SSH configuration should have been
+ # created.
+ ssh_cmd_vm="$(ssh_cmd "${vm}")"
+ atf_check [ "x${ssh_cmd_vm}" '!=' "x" ]
+}
+
+# vm_destroy - stop and erase a virtual machine.
+vm_destroy () {
+ vm="${1}"
+ vmctl.sh destroy "${vm}" "zroot/tests/pf"
+}
+
+# vm_ether - get Ethernet address of interface of virtual machine.
+vm_ether () {
+ vm="${1}"
+ iface="${2}"
+ ssh_cmd_vm="$(ssh_cmd "${vm}")" || return 1
+ ether_pattern='[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]:[0-9a-f][0-9a-f]'
+ ${ssh_cmd_vm} ifconfig "${iface}" | \
+ grep -i 'ether' | grep -io "${ether_pattern}"
+}
+
+# upload_file - Upload file to virtual machine.
+upload_file () {
+ vm="${1}"
+ file="${2}"
+ filename="${3}"
+ [ -z "${filename}" ] && filename="${file}"
+ (
+ cat "$(atf_get_srcdir)/files/${file}" | \
+ $(ssh_cmd "${vm}") "cat > /root/${filename}"
+ ) || atf_fail "Upload ${file} ${filename}"
+}
Index: tests/sys/netpfil/pf/files/scrub6.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub6.py
@@ -0,0 +1,89 @@
+# /usr/bin/env python2
+
+import scapy.all as sp
+
+import itertools as it
+import multiprocessing as mp
+import random, sys, time
+
+import conf, util
+
+raw_500 = ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500]
+
+ether1 = sp.Ether(src=conf.LOCAL_MAC_1, dst=conf.REMOTE_MAC_1)
+ether2 = sp.Ether(src=conf.LOCAL_MAC_2, dst=conf.REMOTE_MAC_2)
+ip1 = sp.IPv6(src=conf.LOCAL_ADDR6_1, dst=conf.LOCAL_ADDR6_3)
+ip2 = sp.IPv6(src=conf.LOCAL_ADDR6_2, dst=conf.LOCAL_ADDR6_3)
+icmp = sp.ICMPv6EchoRequest(id=random.randrange(1 << 16),
+ seq=random.randrange(1 << 16), data=raw_500)
+
+p1 = ether1 / ip1 / icmp
+p2 = ether2 / ip2 / icmp
+tofrag1 = ether1 / ip1 / sp.IPv6ExtHdrFragment() / icmp
+tofrag2 = ether2 / ip2 / sp.IPv6ExtHdrFragment() / icmp
+
+def sendpackets():
+ time.sleep(1)
+ sp.sendp(sp.fragment6(tofrag1, 400), iface=conf.LOCAL_IF_1, verbose=False)
+ sp.sendp(sp.fragment6(tofrag2, 400), iface=conf.LOCAL_IF_2, verbose=False)
+
+if len(sys.argv) < 2:
+ exit('No command given')
+
+if sys.argv[1] == 'sendonly':
+ sendpackets()
+ exit()
+else:
+ exit('Bad command: %s' % repr(sys.argv[1]))
+
+# Following sniff-and-reassembly code kept for future usage.
+
+sender = mp.Process(target=sendpackets)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.LOCAL_IF_3, timeout=10)
+
+sender.join()
+
+for i, p in it.izip(it.count(), sniffed):
+ show = []
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IPv6:
+ show.append(('IPv6', p.src, p.dst))
+ elif type(p) == sp.IPv6ExtHdrFragment:
+ show.append(('Fragment', p.id, p.offset, p.m))
+ elif type(p) == sp.ICMPv6EchoRequest:
+ show.append(('Echo-Request', p.data))
+ elif type(p) == sp.Raw:
+ show.append(('Raw', p.load))
+ p = p.payload
+ print 'Packet', i, ':', show
+
+success1, success2 = False, False
+
+defr = util.Defragmenter6()
+pp1, pp2 = p1.payload, p2.payload # IPv6 layer
+for p in sniffed:
+ pp_nfrag = defr.more(p)
+ if pp_nfrag is None:
+ continue
+ pp, nfrag = pp_nfrag
+
+ # At this point, pp is a packet that has been reassembled from
+ # sniffed packets. We can use nfrag to check how many sniffed
+ # packets it was reassembled from.
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ success1 = success1 or (nfrag == 1 and
+ (pp.src, pp.dst) == (pp1.src, pp1.dst) and
+ str(pp.payload) == str(pp1.payload))
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ success2 = success2 or (nfrag == 2 and
+ (pp.src, pp.dst) == (pp2.src, pp2.dst) and
+ str(pp.payload) == str(pp2.payload))
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/scrub_forward.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub_forward.py
@@ -0,0 +1,74 @@
+# /usr/bin/env python2
+
+import multiprocessing as mp
+import scapy.all as sp
+import conf
+import time
+import random
+import itertools as it
+
+import util
+
+raw_500 = ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500]
+
+ether1 = sp.Ether(src=conf.LOCAL_MAC_1, dst=conf.REMOTE_MAC_1)
+ether2 = sp.Ether(src=conf.LOCAL_MAC_2, dst=conf.REMOTE_MAC_2)
+ip1 = sp.IP(src=conf.LOCAL_ADDR_1,
+ dst=conf.LOCAL_ADDR_3, id=random.randrange(1 << 16))
+ip2 = sp.IP(src=conf.LOCAL_ADDR_2,
+ dst=conf.LOCAL_ADDR_3, id=random.randrange(1 << 16))
+icmp = sp.ICMP(type='echo-request',
+ id=random.randrange(1 << 16), seq=random.randrange(1 << 16))
+
+p1 = ether1 / ip1 / icmp / raw_500
+p2 = ether2 / ip2 / icmp / raw_500
+
+def sendpackets():
+ time.sleep(1)
+ sp.sendp(sp.fragment(p1, 300), iface=conf.LOCAL_IF_1, verbose=False)
+ sp.sendp(sp.fragment(p2, 300), iface=conf.LOCAL_IF_2, verbose=False)
+
+sender = mp.Process(target=sendpackets)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.LOCAL_IF_3, timeout=5)
+
+sender.join()
+
+# for i, p in it.izip(it.count(), sniffed):
+# print '==== Packet', i, '===='
+# p.show()
+# print
+
+success1, success2 = False, False
+
+defr = util.Defragmenter()
+pp1, pp2 = p1.payload, p2.payload # IP layer
+k1, k2 = util.pkey(pp1), util.pkey(pp2)
+for p in sniffed:
+ pp = defr.more(p)
+ if pp is None:
+ continue
+ k = util.pkey(pp)
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ if not success1:
+ # print 'success1 == False'
+ success1 = (k == k1 and defr.stats[k] == 1 and
+ str(pp.payload) == str(pp1.payload))
+ # print 'success1 ==', success1
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ if not success2:
+ # print 'success2 == False'
+ success2 = (k == k2 and defr.stats[k] == 2 and
+ str(pp.payload) == str(pp2.payload))
+ # print 'success2 ==', success2
+
+# print 'success1 ==', success1
+# print 'success2 ==', success2
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/scrub_pflog.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/scrub_pflog.py
@@ -0,0 +1,68 @@
+# /usr/bin/env python2
+
+import multiprocessing as mp
+import scapy.layers.pflog
+import scapy.all as sp
+import conf
+import time
+import random
+import itertools as it
+
+import util
+
+raw_500 = ('abcdefghijklmnopqrstuvwxyz' * 22)[random.randrange(26):][:500]
+
+ether1 = sp.Ether(src=conf.PAIR_0_MAC_A, dst=conf.PAIR_0_MAC_B)
+ether2 = sp.Ether(src=conf.PAIR_1_MAC_A, dst=conf.PAIR_1_MAC_B)
+ip1 = sp.IP(src=conf.PAIR_0_ADDR_A,
+ dst=conf.PAIR_0_ADDR_B, id=random.randrange(1 << 16))
+ip2 = sp.IP(src=conf.PAIR_1_ADDR_A,
+ dst=conf.PAIR_1_ADDR_B, id=random.randrange(1 << 16))
+icmp = sp.ICMP(type='echo-request',
+ id=random.randrange(1 << 16), seq=random.randrange(1 << 16))
+
+p1 = ether1 / ip1 / icmp / raw_500
+p2 = ether2 / ip2 / icmp / raw_500
+
+def sendpackets():
+ time.sleep(1)
+ sp.sendp(sp.fragment(p1, 300), iface=conf.PAIR_0_IF_A, verbose=False)
+ sp.sendp(sp.fragment(p2, 300), iface=conf.PAIR_1_IF_A, verbose=False)
+
+sender = mp.Process(target=sendpackets)
+sender.start()
+
+sniffed = sp.sniff(iface=conf.PFLOG_IF, timeout=5)
+#sniffed = sp.sniff(iface=conf.PAIR_1_IF_B, timeout=5)
+
+sender.join()
+
+for i, p in it.izip(it.count(), sniffed):
+ if True: #sp.IP in p:
+ print '==== Packet', i, '===='
+ p.show()
+ print
+
+success1, success2 = False, False
+
+defr = util.Defragmenter()
+pp1, pp2 = p1.payload, p2.payload # IP layer
+k1, k2 = util.pkey(pp1), util.pkey(pp2)
+for p in sniffed:
+ pp = defr.more(p)
+ if pp is None:
+ continue
+ k = util.pkey(pp)
+
+ # Success for interface 1 if packet received in 1 fragment,
+ # i.e. scrub active on remote side.
+ success1 = success1 or (k == k1 and defr.stats[k] == 1 and
+ str(pp.payload) == str(pp1.payload))
+
+ # Success for interface 2 if packet received in 2 fragments,
+ # i.e. no scrub on remote side.
+ success2 = success2 or (k == k2 and defr.stats[k] == 2 and
+ str(pp.payload) == str(pp2.payload))
+
+if not (success1 and success2):
+ exit(1)
Index: tests/sys/netpfil/pf/files/util.py
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/util.py
@@ -0,0 +1,138 @@
+# python2
+
+import scapy.all as sp
+
+def pkey(packet):
+ '''Packet key.'''
+ return (packet.src, packet.dst, packet.proto, packet.id)
+
+class Defragmenter(object):
+ def __init__(self):
+ self.frags = dict()
+ self.stats = dict()
+ def more(self, packet):
+ '''Add fragmented packet, return whole packet if complete.'''
+
+ # Find IP layer.
+ p = packet
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IP:
+ break
+ p = p.payload
+ else:
+ return
+
+ # # Return directly if not fragmented.
+ # if not ((p.flags & 1) or p.frag): # & 1 for MF
+ # return p
+
+ # Add fragment to its packet group.
+ key, val = pkey(p), (p.frag, p)
+ if key in self.frags:
+ self.frags[key].append(val)
+ self.stats[key] += 1
+ else:
+ self.frags[key] = [val]
+ self.stats[key] = 1
+ frag = self.frags[key]
+ frag.sort()
+
+ # Now all fragments in the group are sorted,
+ # go through them and connect them.
+ i = 0
+ while i + 1 < len(frag):
+ f1, p1 = frag[i]
+ f2, p2 = frag[i + 1]
+ len1, len2 = len(p1.payload), len(p2.payload)
+ if len1 == (f2 - f1) * 8:
+ header1 = sp.IP(tos=p1.tos, flags=p1.flags, ttl=p1.ttl,
+ src=p1.src, dst=p1.dst,
+ proto=p1.proto, id=p1.id)
+ # Now copy MF flag from p2.
+ header1.flags = (header1.flags & ~1) | (p2.flags & 1)
+ # Step 1/2: important for correct length field.
+ p = header1 / (str(p1.payload) + str(p2.payload))
+ # Step 2/2: important to recreate all layers.
+ p = sp.IP(str(p))
+ frag[i:i + 2] = [(f1, p)]
+ else:
+ i += 1
+
+ # Return packet if complete.
+ p = frag[0][1]
+ isfirst, islast = (not p.frag), (not (p.flags & 1))
+ if len(frag) == 1 and isfirst and islast:
+ del self.frags[key]
+ return p
+
+def pkey6(packet):
+ '''Packet key.'''
+ id = packet[sp.IPv6ExtHdrFragment].id
+ return (packet.src, packet.dst, id)
+
+class Defragmenter6(object):
+ def __init__(self):
+ self.frags = dict()
+ self.stats = dict()
+ def more(self, packet):
+ '''Add fragmented packet, return whole packet if complete.
+
+ Returns None on no reassembly, or (p, n), where:
+ p is the defragmented packet ;
+ n is the number of original fragments.'''
+
+ # Find IPv6 layer.
+ p = packet
+ while type(p) != sp.NoPayload:
+ if type(p) == sp.IPv6:
+ break
+ p = p.payload
+ else:
+ return
+
+ # Return directly if not fragmented.
+ if type(p.payload) != sp.IPv6ExtHdrFragment:
+ return (p, 1)
+
+ # Add fragment to its packet group.
+ key, val = pkey6(p), (p.payload.offset, p)
+ if key in self.frags:
+ self.frags[key].append(val)
+ self.stats[key] += 1
+ else:
+ self.frags[key] = [val]
+ self.stats[key] = 1
+ frag = self.frags[key]
+ frag.sort()
+
+ # Now all fragments in the group are sorted,
+ # go through them and connect them.
+ i = 0
+ while i + 1 < len(frag):
+ f1, p1 = frag[i]
+ f2, p2 = frag[i + 1]
+ pfrag1, pfrag2 = p1.payload, p2.payload
+ len1, len2 = len(pfrag1.payload), len(pfrag2.payload)
+ if len1 == (f2 - f1) * 8:
+ header = sp.IPv6(tc=p1.tc, fl=p1.fl, hlim=p1.hlim,
+ src=p1.src, dst=p1.dst)
+ headerfrag = sp.IPv6ExtHdrFragment(nh=pfrag1.nh, offset=f1,
+ res1=pfrag1.res1,
+ res2=pfrag1.res2,
+ id=pfrag1.id, m=pfrag2.m)
+ p = (header / headerfrag /
+ (str(pfrag1.payload) + str(pfrag2.payload)))
+ frag[i:i + 2] = [(f1, p)]
+ else:
+ i += 1
+
+ # Return packet if complete.
+ p = frag[0][1]
+ pfrag = p.payload
+ isfirst, islast = (not pfrag.offset), (not pfrag.m)
+ if len(frag) == 1 and isfirst and islast:
+ del self.frags[key]
+ header = sp.IPv6(tc=p.tc, fl=p.fl, hlim=p.hlim, nh=pfrag.nh,
+ src=p.src, dst=p.dst)
+ payload = str(pfrag.payload)
+ return (header / payload, self.stats[key])
Index: tests/sys/netpfil/pf/files/vmctl.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/vmctl.sh
@@ -0,0 +1,152 @@
+#! /bin/sh
+
+# vmctl.sh - control a VM for tests.
+#
+# vmctl.sh runs all necessary zfs commands, only receiving the
+# directory name from the caller. All network configuration visible
+# to the VM is received through the vmctl.${vm}.rcappend file. The
+# first interface specified in the ${ifs} list is the one for which
+# SSH is setup.
+
+debug () {
+ echo "DEBUG: vmctl: (vm=$vm) $@" >&2
+}
+
+#debug "command line: $@"
+
+cmd="${1}"
+vm="${2}"
+zdir="${3}"
+console="${4}"
+shift 4
+ifs="$@"
+
+usage="\
+Usage: ${0} \"create\" {vm} {zdir} {console} {if1 if2 ...}
+ ${0} \"destroy\" {vm} {zdir}"
+
+baseimg="${zdir}/baseimg"
+snap="${zdir}/baseimg@${vm}"
+vmimg="${zdir}/vm.${vm}"
+mountdir="/mnt/tests/pf/vm.${vm}"
+
+# Make sure baseimg exists as a dataset.
+check_baseimg () {
+ # Return with success immediately if mountpoint (and, by
+ # extension, the dataset) exists and contains the image file.
+ zmountbase="$(zfs get -H -o value mountpoint ${baseimg})" &&
+ [ -e "${zmountbase}/img" ] && return
+ return 1
+ #zfs create -p "${baseimg}" || return 1
+ #zmountbase="$(zfs get -H -o value mountpoint ${baseimg})" || return 1
+ # Download image file.
+ # fetch -o "${imgfile}.xz" \
+ # "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # || return 1
+ # TODO Use local copy of above for now.
+ # cp -ai "/var/tmp/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # "${zmountbase}/img.xz" || return 1
+ #cp -ai "/usr/obj/usr/home/paggas/paggas.freebsd/release/vm-cccc.raw" \
+ # "${zmountbase}/img" || return 1
+ # TODO Install scapy on image.
+}
+
+# Install system on VM.
+make_install () {
+ # TODO Copy pf binary files from host to VM. Quick fix while we
+ # use official images, will do proper system installs in the
+ # future.
+ cp -a "/boot/kernel/pf.ko" \
+ "${mountdir}/boot/kernel/pf.ko" || return 1
+ cp -a "/sbin/pfctl" \
+ "${mountdir}/sbin/pfctl" || return 1
+}
+
+write_sshlogin () {
+ addr="$(grep -E "ifconfig_.*inet.*[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" \
+ "vmctl.${vm}.rcappend" |
+ sed -E "s/.*[^0-9]([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*/\1/" |
+ head -n 1)" || return 1
+ [ "x${addr}" '!=' "x" ] || return 1
+ echo "root@${addr}" > "vmctl.${vm}.sshlogin" || return 1
+}
+
+#debug 'begin'
+case "${cmd}" in
+ (create)
+ check_baseimg || exit 1
+ zfs snap "${snap}" || exit 1
+ zfs clone "${snap}" "${vmimg}" || exit 1
+ ssh-keygen -q -P '' -f "vmctl.${vm}.id_rsa" || exit 1
+ write_sshlogin || exit 1
+ mkdir -p "${mountdir}" || exit 1
+ zmountvm="$(zfs get -H -o value mountpoint ${vmimg})" || return 1
+ md="$(mdconfig ${zmountvm}/img)" || exit 1
+ (
+ mount "/dev/${md}p3" "${mountdir}" || return 1
+ (
+ make_install || return 1
+ (
+ umask 077 || return 1
+ mkdir -p "${mountdir}/root/.ssh" || return 1
+ cat "vmctl.${vm}.id_rsa.pub" >> \
+ "${mountdir}/root/.ssh/authorized_keys"
+ ) || return 1
+ (
+ echo "PermitRootLogin without-password" ;
+ echo "StrictModes no" ;
+ ) >> "${mountdir}/etc/ssh/sshd_config" || return 1
+ echo "sshd_enable=\"YES\"" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ cat "vmctl.${vm}.rcappend" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ # Test
+ # echo "ifconfig vtnet0 ether 02:00:00:00:00:01" >> \
+ # "${mountdir}/etc/start_if.vtnet0" || return 1
+ # echo "ifconfig vtnet1 ether 02:00:00:00:00:02" >> \
+ # "${mountdir}/etc/start_if.vtnet1" || return 1
+ #debug 'all append good'
+ )
+ appendstatus="$?"
+ #debug "appendstatus in: ${appendstatus}"
+ umount "${mountdir}"
+ return "${appendstatus}"
+ )
+ appendstatus="$?"
+ mdconfig -du "${md}"
+ rmdir "${mountdir}"
+ #debug "appendstatus out: ${appendstatus}"
+ [ "x${appendstatus}" = 'x0' ] || return 1
+ (
+ ifsopt=''
+ for i in ${ifs} ; do
+ ifsopt="${ifsopt} -t ${i}" ; done
+ #debug "ifsopt: ${ifsopt}"
+ daemon -p "vmctl.${vm}.pid" \
+ sh /usr/share/examples/bhyve/vmrun.sh ${ifsopt} \
+ -d "${zmountvm}/img" -C "${console}" \
+ "tests-pf-${vm}"
+ sleep 5 # TODO debug only
+ #ls -la '/dev/vmm' >&2
+ )
+ ;;
+ (destroy)
+ bhyvectl --destroy --vm="tests-pf-${vm}" >&2
+ [ -e "vmctl.${vm}.pid" ] && kill "$(cat vmctl.${vm}.pid)"
+ rm "vmctl.${vm}.id_rsa" \
+ "vmctl.${vm}.id_rsa.pub" \
+ "vmctl.${vm}.sshlogin"
+ # TODO Sleep a bit before destroying dataset, so that it
+ # doesn't show up as "busy".
+ sleep 5
+ zfs destroy -R "${snap}"
+ ;;
+ (*)
+ echo "${usage}" >&2
+ exit 1
+ ;;
+esac
+
+status="$?"
+#debug "status: ${status}"
+exit "${status}"
Index: tests/sys/netpfil/pf/files/vmctl.sh.zvol
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/files/vmctl.sh.zvol
@@ -0,0 +1,127 @@
+#! /bin/sh
+
+# vmctl.sh - control a VM for tests.
+#
+# vmctl.sh runs all necessary zfs commands, only receiving the
+# directory name from the caller. All network configuration visible
+# to the VM is received through the vmctl.${vm}.rcappend file. The
+# first interface specified in the ${ifs} list is the one for which
+# SSH is setup.
+
+cmd="${1}"
+vm="${2}"
+zdir="${3}"
+console="${4}"
+shift 4
+ifs="$@"
+
+usage="\
+Usage: ${0} \"create\" {vm} {zdir} {console} {if1 if2 ...}
+ ${0} \"destroy\" {vm} {zdir}"
+
+baseimg="${zdir}/baseimg"
+snap="${zdir}/baseimg@${vm}"
+vmimg="${zdir}/vm.${vm}"
+mountdir="/mnt/tests/pf/vm.${vm}"
+
+# Make sure baseimg exists as a zvol.
+make_baseimg () {
+ [ -e "/dev/zvol/${baseimg}" ] && return
+ tempdir="$(mktemp -d)"
+ (
+ # Download image file.
+ imgfile="${tempdir}/FreeBSD-11.0-RELEASE-amd64.raw"
+ # fetch -o "${imgfile}.xz" \
+ # "https://download.freebsd.org/ftp/releases/VM-IMAGES/11.0-RELEASE/amd64/Latest/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ # || return 1
+ # TODO Use local copy of above for now.
+ cp -ai "/var/tmp/FreeBSD-11.0-RELEASE-amd64.raw.xz" \
+ "${imgfile}.xz" || return 1
+ unxz "${imgfile}.xz" || return 1
+ size="$(stat -f '%z' ${imgfile})"
+ # Round up to multiple of 16M.
+ [ "$(expr ${size} % 16777216)" = 0 ] ||
+ size="$(expr \( \( $size / 16777216 \) + 1 \) \* 16777216)"
+ # Copy image file to zvol.
+ zfs create -p -V "${size}" "${baseimg}" || return 1
+ dd bs=16M if="${imgfile}" of="/dev/zvol/${baseimg}" || return 1
+ )
+ status="$?"
+ rm -r "${tempdir}"
+ return "${status}"
+}
+
+# Install system on VM.
+make_install () {
+ # TODO Copy pf binary files from host to VM. Quick fix while we
+ # use official images, will do proper system installs in the
+ # future.
+ cp -a "/boot/kernel/pf.ko" \
+ "${mountdir}/boot/kernel/pf.ko" || return 1
+ cp -a "/sbin/pfctl" \
+ "${mountdir}/sbin/pfctl" || return 1
+}
+
+write_sshlogin () {
+ addr="$(grep -E "ifconfig_.*inet.*[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" \
+ "vmctl.${vm}.rcappend" |
+ sed -E "s/.*[^0-9]([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+).*/\1/" |
+ head -n 1)" || return 1
+ [ "x${addr}" '!=' "x" ] || return 1
+ echo "root@${addr}" > "vmctl.${vm}.sshlogin" || return 1
+}
+
+case "${cmd}" in
+ (create)
+ make_baseimg || exit 1
+ zfs snap "${snap}" || exit 1
+ zfs clone "${snap}" "${vmimg}" || exit 1
+ ssh-keygen -q -P '' -f "vmctl.${vm}.id_rsa" || exit 1
+ write_sshlogin || exit 1
+ mkdir -p "${mountdir}" || exit 1
+ mount "/dev/zvol/${vmimg}p3" "${mountdir}" || exit 1
+ (
+ make_install || return 1
+ (
+ umask 0177 || return 1
+ mkdir -p "${mountdir}/root/.ssh" || return 1
+ cat "vmctl.${vm}.id_rsa" >> \
+ "${mountdir}/root/.ssh/authorized_keys"
+ ) || return 1
+ echo "PermitRootLogin without-password" >> \
+ "${mountdir}/etc/ssh/sshd_config" || return 1
+ echo "sshd_enable=\"YES\"" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ cat "vmctl.${vm}.rcappend" >> \
+ "${mountdir}/etc/rc.conf" || return 1
+ )
+ appendstatus="$?"
+ umount "${mountdir}"
+ rmdir "${mountdir}"
+ [ "x${appendstatus}" = 'x0' ] || return 1
+ (
+ ifsopt=''
+ for i in ${ifs} ; do
+ ifsopt="${ifsopt} -t ${i}" ; done
+ daemon -p "vmctl.${vm}.pid" \
+ sh /usr/share/examples/bhyve/vmrun.sh ${ifsopt} \
+ -d "/dev/zvol/${vmimg}" -C "${console}" \
+ "tests-pf-${vm}"
+ )
+ ;;
+ (destroy)
+ bhyvectl --destroy --vm="tests-pf-${vm}"
+ [ -e "vmctl.${vm}.pid" ] && kill "$(cat vmctl.${vm}.pid)"
+ rm "vmctl.${vm}.id_rsa" \
+ "vmctl.${vm}.id_rsa.pub" \
+ "vmctl.${vm}.sshlogin"
+ # TODO Sleep a bit before destroying dataset, so that it
+ # doesn't show up as "busy".
+ sleep 5
+ zfs destroy -R "${snap}"
+ ;;
+ (*)
+ echo "${usage}" >&2
+ exit 1
+ ;;
+esac
Index: tests/sys/netpfil/pf/pf_test.sh
===================================================================
--- /dev/null
+++ tests/sys/netpfil/pf/pf_test.sh
@@ -0,0 +1,308 @@
+# Make will add a shebang line at the top of this file.
+
+# These tests connect to a remote test machine, load a rules file,
+# possibly start some services, and run some tests. The tests cleanup
+# the test machine in the end.
+#
+# SSH root access to the test machine is required for the tests to
+# work.
+
+. "$(atf_get_srcdir)/files/pf_test_util.sh"
+
+# Starts two instances of nc on the remote machine, listening on two
+# different ports, of which one port is blocked-with-return by the
+# remote pf. The test tries then to connect to the two instances from
+# the local machine. The test succeeds if one connection succeeds but
+# the other one fails.
+atf_test_case remote_block_return cleanup
+remote_block_return_head () {
+ atf_set descr 'Block-with-return a port and test that it is blocked.'
+}
+remote_block_return_body () {
+ block_port="50000"
+ pass_port="50001"
+ rules="block return in on vtnet1 proto tcp to port ${block_port}"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create client tap19303 10.135.213.33/28 vtnet1 10.135.213.35/28
+ tap_create server tap19304 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create server tap19305 10.135.213.34/28 vtnet1 10.135.213.36/28
+ bridge_create bridge6555 tap19303 tap19305
+ # Start VMs.
+ vm_create client tap19302 tap19303
+ vm_create server tap19304 tap19305
+ # Debug
+ #atf_check sleep 900
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 60
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Start test.
+ atf_check daemon -p nc.block.pid $(ssh_cmd server) "nc -l ${block_port}"
+ atf_check daemon -p nc.pass.pid $(ssh_cmd server) "nc -l ${pass_port}"
+ remote_addr_1="10.135.213.36"
+ atf_check -s exit:1 -e empty $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${block_port}"
+ atf_check -s exit:0 -e ignore $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${pass_port}"
+}
+remote_block_return_cleanup () {
+ # Stop test.
+ [ -e nc.block.pid ] && kill "$(cat nc.block.pid)"
+ [ -e nc.pass.pid ] && kill "$(cat nc.pass.pid)"
+ # # Stop pf.
+ # $(ssh_cmd server) "pfctl -dFa ;
+ # kldunload -n pf ;
+ # true"
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+}
+
+atf_test_case remote_block_drop cleanup
+remote_block_drop_head () {
+ atf_set descr 'Block-with-drop a port and test that it is blocked.'
+}
+remote_block_drop_body () {
+ block_port="50000"
+ pass_port="50001"
+ rules="block drop in on vtnet1 proto tcp to port ${block_port}"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create client tap19303 10.135.213.33/28 vtnet1 10.135.213.35/28
+ tap_create server tap19304 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create server tap19305 10.135.213.34/28 vtnet1 10.135.213.36/28
+ bridge_create bridge6555 tap19303 tap19305
+ # Start VMs.
+ vm_create client tap19302 tap19303
+ vm_create server tap19304 tap19305
+ # Debug
+ #atf_check sleep 900
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 60
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Start test.
+ atf_check daemon -p nc.block.pid $(ssh_cmd server) "nc -l ${block_port}"
+ atf_check daemon -p nc.pass.pid $(ssh_cmd server) "nc -l ${pass_port}"
+ remote_addr_1="10.135.213.36"
+ atf_check -s exit:1 -e empty $(ssh_cmd client) \
+ "nc -z -w 4 ${remote_addr_1} ${block_port}"
+ atf_check -s exit:0 -e ignore $(ssh_cmd client) \
+ "nc -z ${remote_addr_1} ${pass_port}"
+}
+remote_block_drop_cleanup () {
+ # Stop test.
+ [ -e nc.block.pid ] && kill "$(cat nc.block.pid)"
+ [ -e nc.pass.pid ] && kill "$(cat nc.pass.pid)"
+ # # Stop pf.
+ # $(ssh_cmd server) "pfctl -dFa ;
+ # kldunload -n pf ;
+ # true"
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+}
+
+# This test uses 2 interfaces to connect to the test machine,
+# $REMOTE_IF_1 and $REMOTE_IF_2. The test machine is doing reassembly
+# on one of the two interfaces. We send one echo request on each
+# interface of size 3000, which will be fragmented before being sent.
+# We capture the traffic on the test machine's pflog and transfer the
+# capture file to the host machine for processing. The capture file
+# should show a reassembled echo request packet on one interface and
+# the original fragmented set of packets on the other.
+atf_test_case remote_scrub_todo cleanup
+remote_scrub_todo_head () {
+ atf_set descr 'Scrub on one of two interfaces and test difference.'
+}
+remote_scrub_todo_body () {
+ # files to be used in local directory: tempdir.var tcpdump.pid
+ # files to be used in remote temporary directory: pflog.pcap
+ rules="scrub in on $REMOTE_IF_1 all fragment reassemble
+ pass log (all, to pflog0) on { $REMOTE_IF_1 $REMOTE_IF_2 }"
+ atf_check ssh "$SSH_0" 'kldload -n pf pflog'
+ echo "$rules" | atf_check -e ignore ssh "$SSH_0" 'pfctl -ef -'
+ atf_check -o save:tempdir.var ssh "$SSH_0" 'mktemp -dt pf_test.tmp'
+ #atf_check_equal 0 "$?"
+ tempdir="$(cat tempdir.var)"
+ timeout=5
+ atf_check daemon -p tcpdump.pid ssh "$SSH_0" \
+ "timeout $timeout tcpdump -U -i pflog0 -w $tempdir/pflog.pcap"
+ (cd "$(atf_get_srcdir)/files" &&
+ atf_check python2 scrub6.py sendonly)
+ # Wait for tcpdump to pick up everything.
+ atf_check sleep "$(expr "$timeout" + 2)"
+ # Not sure if following will work with atf_check
+ atf_check scp "$SSH_0:$tempdir/pflog.pcap" ./
+ # TODO following will be removed when the test is complete, but
+ # since processing isn't implemented yet, we just save the file
+ # for now.
+ atf_check cp pflog.pcap "$(atf_get_srcdir)/"
+ # TODO process pflog.pcap for verification
+}
+remote_scrub_todo_cleanup () {
+ kill "$(cat tcpdump.pid)"
+ tempdir="$(cat tempdir.var)"
+ ssh "$SSH_0" "rm -r \"$tempdir\" ; pfctl -dFa"
+}
+
+atf_test_case remote_scrub_forward cleanup
+remote_scrub_forward_head () {
+ atf_set descr 'Scrub defrag with forward on one \
+of two interfaces and test difference.'
+}
+remote_scrub_forward_body () {
+ rules="scrub in on vtnet1 all fragment reassemble
+ pass log (all to pflog0) on { vtnet1 vtnet2 }"
+ # Set up networking.
+ tap_create client tap19302 10.135.213.1/28 vtnet0 10.135.213.2/28
+ tap_create server tap19303 10.135.213.17/28 vtnet0 10.135.213.18/28
+ tap_create client tap19304 10.135.213.33/28 vtnet1 10.135.213.34/28
+ tap_create server tap19305 10.135.213.35/28 vtnet1 10.135.213.36/28
+ tap_create client tap19306 10.135.213.49/28 vtnet2 10.135.213.50/28
+ tap_create server tap19307 10.135.213.51/28 vtnet2 10.135.213.52/28
+ tap_create client tap19308 10.135.213.65/28 vtnet3 10.135.213.66/28
+ tap_create server tap19309 10.135.213.67/28 vtnet3 10.135.213.68/28
+ bridge_create bridge6555 tap19304 tap19305
+ bridge_create bridge6556 tap19306 tap19307
+ bridge_create bridge6557 tap19308 tap19309
+ # Start VMs.
+ vm_create client tap19302 tap19304 tap19306 tap19308
+ vm_create server tap19303 tap19305 tap19307 tap19309
+ # Wait for VMs to start up and for their SSH deamons to start
+ # listening.
+ atf_check sleep 120
+ # Debug
+ #atf_check sleep 900
+ # Start pf.
+ atf_check $(ssh_cmd server) "kldload -n pf"
+ echo "${rules}" | atf_check -e ignore $(ssh_cmd server) "pfctl -ef -"
+ # Enable forwarding.
+ atf_check -o ignore $(ssh_cmd server) "sysctl net.inet.ip.forwarding=1"
+ # Warm up connections, so that network discovery is complete.
+ atf_check -o ignore $(ssh_cmd server) "ping -c3 10.135.213.36"
+ atf_check -o ignore $(ssh_cmd server) "ping -c3 10.135.213.52"
+ atf_check -o ignore $(ssh_cmd server) "ping -c3 10.135.213.68"
+ # Upload test to VM.
+ upload_file client "scrub_forward.py" "test.py"
+ upload_file client "util.py"
+ (
+ client_ether1="$(vm_ether client vtnet1)" || return 1
+ client_ether2="$(vm_ether client vtnet2)" || return 1
+ server_ether1="$(vm_ether server vtnet1)" || return 1
+ server_ether2="$(vm_ether server vtnet2)" || return 1
+ echo "\
+LOCAL_MAC_1='${client_ether1}'
+LOCAL_MAC_2='${client_ether2}'
+REMOTE_MAC_1='${server_ether1}'
+REMOTE_MAC_2='${server_ether2}'
+LOCAL_ADDR_1='10.135.213.34'
+LOCAL_ADDR_2='10.135.213.50'
+LOCAL_ADDR_3='10.135.213.66'
+REMOTE_ADDR_1='10.135.213.36'
+REMOTE_ADDR_2='10.135.213.52'
+REMOTE_ADDR_3='10.135.213.68'
+LOCAL_IF_1='vtnet1'
+LOCAL_IF_2='vtnet2'
+LOCAL_IF_3='vtnet3'\
+" | $(ssh_cmd client) "cat >> /root/conf.py"
+ ) || atf_fail "Upload conf.py"
+ # Run test.
+ atf_check -o ignore $(ssh_cmd client) "cd /root && ${PYTHON2} test.py"
+}
+remote_scrub_forward_cleanup () {
+ # Stop VMs.
+ vm_destroy client
+ vm_destroy server
+ # Tear down networking.
+ ifconfig bridge6555 destroy
+ ifconfig bridge6556 destroy
+ ifconfig bridge6557 destroy
+ ifconfig tap19302 destroy
+ ifconfig tap19303 destroy
+ ifconfig tap19304 destroy
+ ifconfig tap19305 destroy
+ ifconfig tap19306 destroy
+ ifconfig tap19307 destroy
+ ifconfig tap19308 destroy
+ ifconfig tap19309 destroy
+}
+
+atf_test_case remote_scrub_forward6 cleanup
+remote_scrub_forward6_head () {
+ atf_set descr 'Scrub defrag with forward on one \
+of two interfaces and test difference, IPv6 version.'
+}
+remote_scrub_forward6_body () {
+ rules="scrub in on $REMOTE_IF_1 all fragment reassemble
+ pass log (all, to pflog0) on { $REMOTE_IF_1 $REMOTE_IF_2 }"
+ cd "$(atf_get_srcdir)"
+ # Enable pf.
+ atf_check ssh "$SSH_0" kldload -n pf
+ echo "$rules" | atf_check -e ignore ssh "$SSH_0" pfctl -ef -
+ # Enable forwarding.
+ atf_check -o ignore ssh "$SSH_0" sysctl net.inet6.ip6.forwarding=1
+ # Warm up connections, so that network discovery is complete.
+ atf_check -o ignore ping6 -c3 "$REMOTE_ADDR6_1"
+ atf_check -o ignore ping6 -c3 "$REMOTE_ADDR6_2"
+ atf_check -o ignore ping6 -c3 "$REMOTE_ADDR6_3"
+ # Run test.
+ cd files &&
+ atf_check python2 scrub_forward6.py &&
+ cd ..
+}
+remote_scrub_forward6_cleanup () {
+ ssh "$SSH_0" "pfctl -dFa ;
+ sysctl net.inet6.ip6.forwarding=0"
+}
+
+atf_test_case scrub_pflog cleanup
+scrub_pflog_head () {
+ atf_set descr 'Scrub defrag with pflog on one \
+of two interfaces and test difference.'
+}
+scrub_pflog_body () {
+ pair_create 0 1
+ rules="scrub in on ${PAIR_0_IF_A} all fragment reassemble
+ pass log (all to ${PFLOG_IF}) on { ${PAIR_0_IF_A} ${PAIR_1_IF_A} }"
+ cd "$(atf_get_srcdir)"
+ # Enable pf.
+ atf_check kldload -n pf pflog
+ atf_check ifconfig pflog0 up
+ echo "$rules" | atf_check -e ignore pfctl -ef -
+ # Run test.
+ cd files
+ atf_check python2 scrub_pflog.py
+}
+scrub_pflog_cleanup () {
+ pfctl -dFa
+ ifconfig pflog0 down
+ kldunload -n pf pflog
+ pair_destroy 0 1
+}
+
+atf_init_test_cases () {
+ atf_add_test_case remote_block_return
+ atf_add_test_case remote_block_drop
+ atf_add_test_case remote_scrub_todo
+ atf_add_test_case remote_scrub_forward
+ atf_add_test_case remote_scrub_forward6
+ atf_add_test_case scrub_pflog
+}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Oct 22, 4:51 AM (9 s ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
24041300
Default Alt Text
D11401.id32464.diff (52 KB)
Attached To
Mode
D11401: Kernel pf tests
Attached
Detach File
Event Timeline
Log In to Comment