Index: head/sys/conf/files =================================================================== --- head/sys/conf/files (revision 174981) +++ head/sys/conf/files (revision 174982) @@ -1,2249 +1,2250 @@ # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # acpi_quirks.h optional acpi \ dependency "$S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ compile-with "${AWK} -f $S/tools/acpi_quirks2h.awk $S/dev/acpica/acpi_quirks" \ no-obj no-implicit-rule before-depend \ clean "acpi_quirks.h" aicasm optional ahc | ahd \ dependency "$S/dev/aic7xxx/aicasm/*.[chyl]" \ compile-with "CC='${CC}' ${MAKE} -f $S/dev/aic7xxx/aicasm/Makefile MAKESRCPATH=$S/dev/aic7xxx/aicasm" \ no-obj no-implicit-rule \ clean "aicasm* y.tab.h" aic7xxx_seq.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_seq.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg.h optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic7xxx_reg.h" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.c optional ahc \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic7xxx_seq.h -r aic7xxx_reg.h -p aic7xxx_reg_print.c -i $S/dev/aic7xxx/aic7xxx_osm.h $S/dev/aic7xxx/aic7xxx.seq" \ no-obj no-implicit-rule local \ clean "aic7xxx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic7xxx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic7xxx_reg_print.o optional ahc ahc_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local aic79xx_seq.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_seq.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg.h optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule before-depend local \ clean "aic79xx_reg.h" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.c optional ahd pci \ compile-with "./aicasm ${INCLUDES} -I$S/cam/scsi -I$S/dev/aic7xxx -o aic79xx_seq.h -r aic79xx_reg.h -p aic79xx_reg_print.c -i $S/dev/aic7xxx/aic79xx_osm.h $S/dev/aic7xxx/aic79xx.seq" \ no-obj no-implicit-rule local \ clean "aic79xx_reg_print.c" \ dependency "$S/dev/aic7xxx/aic79xx.{reg,seq} $S/cam/scsi/scsi_message.h aicasm" aic79xx_reg_print.o optional ahd pci ahd_reg_pretty_print \ compile-with "${NORMAL_C}" \ no-implicit-rule local emu10k1-alsa%diked.h optional snd_emu10k1 | snd_emu10kx \ dependency "$S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/emu10k1-alsa.h" \ compile-with "CC='${CC}' AWK=${AWK} sh $S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/emu10k1-alsa.h emu10k1-alsa%diked.h" \ no-obj no-implicit-rule before-depend \ clean "emu10k1-alsa%diked.h" p16v-alsa%diked.h optional snd_emu10kx pci \ dependency "$S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/p16v-alsa.h" \ compile-with "CC='${CC}' AWK=${AWK} sh $S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/p16v-alsa.h p16v-alsa%diked.h" \ no-obj no-implicit-rule before-depend \ clean "p16v-alsa%diked.h" p17v-alsa%diked.h optional snd_emu10kx pci \ dependency "$S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/p17v-alsa.h" \ compile-with "CC='${CC}' AWK=${AWK} sh $S/tools/emu10k1-mkalsa.sh $S/gnu/dev/sound/pci/p17v-alsa.h p17v-alsa%diked.h" \ no-obj no-implicit-rule before-depend \ clean "p17v-alsa%diked.h" miidevs.h optional miibus | mii \ dependency "$S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ compile-with "${AWK} -f $S/tools/miidevs2h.awk $S/dev/mii/miidevs" \ no-obj no-implicit-rule before-depend \ clean "miidevs.h" pccarddevs.h standard \ dependency "$S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ compile-with "${AWK} -f $S/tools/pccarddevs2h.awk $S/dev/pccard/pccarddevs" \ no-obj no-implicit-rule before-depend \ clean "pccarddevs.h" usbdevs.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -h" \ no-obj no-implicit-rule before-depend \ clean "usbdevs.h" usbdevs_data.h optional usb \ dependency "$S/tools/usbdevs2h.awk $S/dev/usb/usbdevs" \ compile-with "${AWK} -f $S/tools/usbdevs2h.awk $S/dev/usb/usbdevs -d" \ no-obj no-implicit-rule before-depend \ clean "usbdevs_data.h" cam/cam.c optional scbus cam/cam_periph.c optional scbus cam/cam_queue.c optional scbus cam/cam_sim.c optional scbus cam/cam_xpt.c optional scbus cam/scsi/scsi_all.c optional scbus cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch cam/scsi/scsi_da.c optional da cam/scsi/scsi_low.c optional ct | ncv | nsp | stg cam/scsi/scsi_low_pisa.c optional ct | ncv | nsp | stg cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_pt.c optional pt cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_ses.c optional ses cam/scsi/scsi_sg.c optional sg cam/scsi/scsi_targ_bh.c optional targbh cam/scsi/scsi_target.c optional targ contrib/altq/altq/altq_cbq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_cdnr.c optional altq contrib/altq/altq/altq_hfsc.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_priq.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_red.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rio.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/altq/altq/altq_rmclass.c optional altq contrib/altq/altq/altq_subr.c optional altq \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/dev/acpica/dbcmds.c optional acpi acpi_debug contrib/dev/acpica/dbdisply.c optional acpi acpi_debug contrib/dev/acpica/dbexec.c optional acpi acpi_debug contrib/dev/acpica/dbfileio.c optional acpi acpi_debug contrib/dev/acpica/dbhistry.c optional acpi acpi_debug contrib/dev/acpica/dbinput.c optional acpi acpi_debug contrib/dev/acpica/dbstats.c optional acpi acpi_debug contrib/dev/acpica/dbutils.c optional acpi acpi_debug contrib/dev/acpica/dbxface.c optional acpi acpi_debug contrib/dev/acpica/dmbuffer.c optional acpi acpi_debug contrib/dev/acpica/dmnames.c optional acpi acpi_debug contrib/dev/acpica/dmopcode.c optional acpi acpi_debug contrib/dev/acpica/dmobject.c optional acpi acpi_debug contrib/dev/acpica/dmresrc.c optional acpi acpi_debug contrib/dev/acpica/dmresrcl.c optional acpi acpi_debug contrib/dev/acpica/dmresrcs.c optional acpi acpi_debug contrib/dev/acpica/dmutils.c optional acpi acpi_debug contrib/dev/acpica/dmwalk.c optional acpi acpi_debug contrib/dev/acpica/dsfield.c optional acpi contrib/dev/acpica/dsinit.c optional acpi contrib/dev/acpica/dsmethod.c optional acpi contrib/dev/acpica/dsmthdat.c optional acpi contrib/dev/acpica/dsobject.c optional acpi contrib/dev/acpica/dsopcode.c optional acpi contrib/dev/acpica/dsutils.c optional acpi contrib/dev/acpica/dswexec.c optional acpi contrib/dev/acpica/dswload.c optional acpi contrib/dev/acpica/dswscope.c optional acpi contrib/dev/acpica/dswstate.c optional acpi contrib/dev/acpica/evevent.c optional acpi contrib/dev/acpica/evgpe.c optional acpi contrib/dev/acpica/evgpeblk.c optional acpi contrib/dev/acpica/evmisc.c optional acpi contrib/dev/acpica/evregion.c optional acpi contrib/dev/acpica/evrgnini.c optional acpi contrib/dev/acpica/evsci.c optional acpi contrib/dev/acpica/evxface.c optional acpi contrib/dev/acpica/evxfevnt.c optional acpi contrib/dev/acpica/evxfregn.c optional acpi contrib/dev/acpica/exconfig.c optional acpi contrib/dev/acpica/exconvrt.c optional acpi contrib/dev/acpica/excreate.c optional acpi contrib/dev/acpica/exdump.c optional acpi contrib/dev/acpica/exfield.c optional acpi contrib/dev/acpica/exfldio.c optional acpi contrib/dev/acpica/exmisc.c optional acpi contrib/dev/acpica/exmutex.c optional acpi contrib/dev/acpica/exnames.c optional acpi contrib/dev/acpica/exoparg1.c optional acpi contrib/dev/acpica/exoparg2.c optional acpi contrib/dev/acpica/exoparg3.c optional acpi contrib/dev/acpica/exoparg6.c optional acpi contrib/dev/acpica/exprep.c optional acpi contrib/dev/acpica/exregion.c optional acpi contrib/dev/acpica/exresnte.c optional acpi contrib/dev/acpica/exresolv.c optional acpi contrib/dev/acpica/exresop.c optional acpi contrib/dev/acpica/exstore.c optional acpi contrib/dev/acpica/exstoren.c optional acpi contrib/dev/acpica/exstorob.c optional acpi contrib/dev/acpica/exsystem.c optional acpi contrib/dev/acpica/exutils.c optional acpi contrib/dev/acpica/hwacpi.c optional acpi contrib/dev/acpica/hwgpe.c optional acpi contrib/dev/acpica/hwregs.c optional acpi contrib/dev/acpica/hwsleep.c optional acpi contrib/dev/acpica/hwtimer.c optional acpi contrib/dev/acpica/nsaccess.c optional acpi contrib/dev/acpica/nsalloc.c optional acpi contrib/dev/acpica/nsdump.c optional acpi contrib/dev/acpica/nseval.c optional acpi contrib/dev/acpica/nsinit.c optional acpi contrib/dev/acpica/nsload.c optional acpi contrib/dev/acpica/nsnames.c optional acpi contrib/dev/acpica/nsobject.c optional acpi contrib/dev/acpica/nsparse.c optional acpi contrib/dev/acpica/nssearch.c optional acpi contrib/dev/acpica/nsutils.c optional acpi contrib/dev/acpica/nswalk.c optional acpi contrib/dev/acpica/nsxfeval.c optional acpi contrib/dev/acpica/nsxfname.c optional acpi contrib/dev/acpica/nsxfobj.c optional acpi contrib/dev/acpica/psargs.c optional acpi contrib/dev/acpica/psloop.c optional acpi contrib/dev/acpica/psopcode.c optional acpi contrib/dev/acpica/psparse.c optional acpi contrib/dev/acpica/psscope.c optional acpi contrib/dev/acpica/pstree.c optional acpi contrib/dev/acpica/psutils.c optional acpi contrib/dev/acpica/pswalk.c optional acpi contrib/dev/acpica/psxface.c optional acpi contrib/dev/acpica/rsaddr.c optional acpi contrib/dev/acpica/rscalc.c optional acpi contrib/dev/acpica/rscreate.c optional acpi contrib/dev/acpica/rsdump.c optional acpi contrib/dev/acpica/rsinfo.c optional acpi contrib/dev/acpica/rsio.c optional acpi contrib/dev/acpica/rsirq.c optional acpi contrib/dev/acpica/rslist.c optional acpi contrib/dev/acpica/rsmemory.c optional acpi contrib/dev/acpica/rsmisc.c optional acpi contrib/dev/acpica/rsutils.c optional acpi contrib/dev/acpica/rsxface.c optional acpi contrib/dev/acpica/tbfadt.c optional acpi contrib/dev/acpica/tbfind.c optional acpi contrib/dev/acpica/tbinstal.c optional acpi contrib/dev/acpica/tbutils.c optional acpi contrib/dev/acpica/tbxface.c optional acpi contrib/dev/acpica/tbxfroot.c optional acpi contrib/dev/acpica/utalloc.c optional acpi contrib/dev/acpica/utcache.c optional acpi contrib/dev/acpica/utclib.c optional acpi contrib/dev/acpica/utcopy.c optional acpi contrib/dev/acpica/utdebug.c optional acpi contrib/dev/acpica/utdelete.c optional acpi contrib/dev/acpica/uteval.c optional acpi contrib/dev/acpica/utglobal.c optional acpi contrib/dev/acpica/utinit.c optional acpi contrib/dev/acpica/utmath.c optional acpi contrib/dev/acpica/utmisc.c optional acpi contrib/dev/acpica/utmutex.c optional acpi contrib/dev/acpica/utobject.c optional acpi contrib/dev/acpica/utresrc.c optional acpi contrib/dev/acpica/utstate.c optional acpi contrib/dev/acpica/utxface.c optional acpi contrib/ipfilter/netinet/fil.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_auth.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_fil_freebsd.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_frag.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_log.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_nat.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_proxy.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_state.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_lookup.c optional ipfilter inet \ compile-with "${NORMAL_C} -Wno-error -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_pool.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_htable.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/ip_sync.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ipfilter/netinet/mlfk_ipl.c optional ipfilter inet \ compile-with "${NORMAL_C} -I$S/contrib/ipfilter" contrib/ngatm/netnatm/api/cc_conn.c optional ngatm_ccatm \ compile-with "${NORMAL_C_NOWERROR} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_data.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_dump.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_port.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_sig.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/cc_user.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/api/unisap.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/straddr.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/misc/unimsg_common.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/traffic.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_ie.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/msg/uni_msg.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/saal/saal_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_call.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_coord.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_party.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_print.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_reset.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/pf/net/if_pflog.c optional pflog \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/if_pfsync.c optional pfsync \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_if.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_ioctl.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_norm.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_osfp.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_ruleset.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_subr.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/net/pf_table.c optional pf \ compile-with "${NORMAL_C} -I$S/contrib/pf" contrib/pf/netinet/in4_cksum.c optional pf inet crypto/blowfish/bf_ecb.c optional ipsec crypto/blowfish/bf_skey.c optional crypto | ipsec crypto/camellia/camellia.c optional crypto | ipsec crypto/camellia/camellia-api.c optional crypto | ipsec crypto/des/des_ecb.c optional crypto | ipsec | netsmb crypto/des/des_setkey.c optional crypto | ipsec | netsmb crypto/rc4/rc4.c optional netgraph_mppc_encryption crypto/rijndael/rijndael-alg-fst.c optional crypto | geom_bde | \ ipsec | random | wlan_ccmp crypto/rijndael/rijndael-api-fst.c optional geom_bde | random crypto/rijndael/rijndael-api.c optional crypto | ipsec | wlan_ccmp crypto/sha1.c optional carp | crypto | ipsec | \ netgraph_mppc_encryption | sctp crypto/sha2/sha2.c optional crypto | geom_bde | ipsec | random | \ sctp ddb/db_access.c optional ddb ddb/db_break.c optional ddb ddb/db_capture.c optional ddb ddb/db_command.c optional ddb ddb/db_examine.c optional ddb ddb/db_expr.c optional ddb ddb/db_input.c optional ddb ddb/db_lex.c optional ddb ddb/db_main.c optional ddb ddb/db_output.c optional ddb ddb/db_print.c optional ddb ddb/db_ps.c optional ddb ddb/db_run.c optional ddb ddb/db_script.c optional ddb ddb/db_sym.c optional ddb ddb/db_thread.c optional ddb ddb/db_textdump.c optional ddb ddb/db_variables.c optional ddb ddb/db_watch.c optional ddb ddb/db_write_cmd.c optional ddb #dev/dpt/dpt_control.c optional dpt dev/aac/aac.c optional aac dev/aac/aac_cam.c optional aacp aac dev/aac/aac_debug.c optional aac dev/aac/aac_disk.c optional aac dev/aac/aac_linux.c optional aac compat_linux dev/aac/aac_pci.c optional aac pci dev/acpi_support/acpi_aiboost.c optional acpi_aiboost acpi dev/acpi_support/acpi_asus.c optional acpi_asus acpi dev/acpi_support/acpi_fujitsu.c optional acpi_fujitsu acpi dev/acpi_support/acpi_ibm.c optional acpi_ibm acpi dev/acpi_support/acpi_panasonic.c optional acpi_panasonic acpi dev/acpi_support/acpi_sony.c optional acpi_sony acpi dev/acpi_support/acpi_toshiba.c optional acpi_toshiba acpi dev/acpica/Osd/OsdDebug.c optional acpi dev/acpica/Osd/OsdHardware.c optional acpi dev/acpica/Osd/OsdInterrupt.c optional acpi dev/acpica/Osd/OsdMemory.c optional acpi dev/acpica/Osd/OsdSchedule.c optional acpi dev/acpica/Osd/OsdStream.c optional acpi dev/acpica/Osd/OsdSynch.c optional acpi dev/acpica/Osd/OsdTable.c optional acpi dev/acpica/acpi.c optional acpi dev/acpica/acpi_acad.c optional acpi dev/acpica/acpi_battery.c optional acpi dev/acpica/acpi_button.c optional acpi dev/acpica/acpi_cmbat.c optional acpi dev/acpica/acpi_cpu.c optional acpi dev/acpica/acpi_ec.c optional acpi dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_isab.c optional acpi isa dev/acpica/acpi_lid.c optional acpi dev/acpica/acpi_package.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_perf.c optional acpi dev/acpica/acpi_powerres.c optional acpi dev/acpica/acpi_quirk.c optional acpi dev/acpica/acpi_resource.c optional acpi dev/acpica/acpi_smbat.c optional acpi dev/acpica/acpi_thermal.c optional acpi dev/acpica/acpi_throttle.c optional acpi dev/acpica/acpi_timer.c optional acpi dev/acpica/acpi_video.c optional acpi_video acpi dev/acpica/acpi_dock.c optional acpi_dock acpi dev/adlink/adlink.c optional adlink dev/advansys/adv_eisa.c optional adv eisa dev/advansys/adv_pci.c optional adv pci dev/advansys/advansys.c optional adv dev/advansys/advlib.c optional adv dev/advansys/advmcode.c optional adv dev/advansys/adw_pci.c optional adw pci dev/advansys/adwcam.c optional adw dev/advansys/adwlib.c optional adw dev/advansys/adwmcode.c optional adw dev/agp/agp.c optional agp pci dev/agp/agp_if.m optional agp pci dev/aha/aha.c optional aha dev/aha/aha_isa.c optional aha isa dev/aha/aha_mca.c optional aha mca dev/ahb/ahb.c optional ahb eisa dev/aic/aic.c optional aic dev/aic/aic_pccard.c optional aic pccard dev/aic7xxx/ahc_eisa.c optional ahc eisa dev/aic7xxx/ahc_isa.c optional ahc isa dev/aic7xxx/ahc_pci.c optional ahc pci dev/aic7xxx/ahd_pci.c optional ahd pci dev/aic7xxx/aic7770.c optional ahc dev/aic7xxx/aic79xx.c optional ahd pci dev/aic7xxx/aic79xx_osm.c optional ahd pci dev/aic7xxx/aic79xx_pci.c optional ahd pci dev/aic7xxx/aic7xxx.c optional ahc dev/aic7xxx/aic7xxx_93cx6.c optional ahc dev/aic7xxx/aic7xxx_osm.c optional ahc dev/aic7xxx/aic7xxx_pci.c optional ahc pci dev/amd/amd.c optional amd dev/amr/amr.c optional amr dev/amr/amr_cam.c optional amr dev/amr/amr_disk.c optional amr dev/amr/amr_linux.c optional amr compat_linux dev/amr/amr_pci.c optional amr pci dev/an/if_an.c optional an dev/an/if_an_isa.c optional an isa dev/an/if_an_pccard.c optional an pccard dev/an/if_an_pci.c optional an pci dev/asr/asr.c optional asr pci dev/ata/ata_if.m optional ata dev/ata/ata-all.c optional ata dev/ata/ata-card.c optional ata pccard dev/ata/ata-cbus.c optional ata pc98 dev/ata/ata-chipset.c optional ata pci dev/ata/ata-disk.c optional atadisk dev/ata/ata-dma.c optional ata pci dev/ata/ata-isa.c optional ata isa dev/ata/ata-lowlevel.c optional ata dev/ata/ata-pci.c optional ata pci dev/ata/ata-queue.c optional ata dev/ata/ata-raid.c optional ataraid dev/ata/ata-usb.c optional atausb dev/ata/atapi-cam.c optional atapicam dev/ata/atapi-cd.c optional atapicd dev/ata/atapi-fd.c optional atapifd dev/ata/atapi-tape.c optional atapist dev/ath/ah_osdep.c optional ath_hal \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/amrr/amrr.c optional ath_rate_amrr dev/ath/ath_rate/onoe/onoe.c optional ath_rate_onoe \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/ath_rate/sample/sample.c optional ath_rate_sample \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath.c optional ath \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/ath/if_ath_pci.c optional ath pci \ compile-with "${NORMAL_C} -I$S/dev/ath" dev/awi/am79c930.c optional awi dev/awi/awi.c optional awi dev/awi/if_awi_pccard.c optional awi pccard dev/bce/if_bce.c optional bce dev/bfe/if_bfe.c optional bfe dev/bge/if_bge.c optional bge dev/bktr/bktr_audio.c optional bktr pci dev/bktr/bktr_card.c optional bktr pci dev/bktr/bktr_core.c optional bktr pci dev/bktr/bktr_i2c.c optional bktr pci smbus dev/bktr/bktr_os.c optional bktr pci dev/bktr/bktr_tuner.c optional bktr pci dev/bktr/msp34xx.c optional bktr pci dev/buslogic/bt.c optional bt dev/buslogic/bt_eisa.c optional bt eisa dev/buslogic/bt_isa.c optional bt isa dev/buslogic/bt_mca.c optional bt mca dev/buslogic/bt_pci.c optional bt pci dev/cardbus/cardbus.c optional cardbus dev/cardbus/cardbus_cis.c optional cardbus dev/cardbus/cardbus_device.c optional cardbus dev/ciss/ciss.c optional ciss dev/cm/smc90cx6.c optional cm dev/cnw/if_cnw.c optional cnw pccard dev/cpufreq/ichss.c optional cpufreq dev/cs/if_cs.c optional cs dev/cs/if_cs_isa.c optional cs isa dev/cs/if_cs_pccard.c optional cs pccard dev/cxgb/cxgb_main.c optional cxgb pci dev/cxgb/cxgb_offload.c optional cxgb pci dev/cxgb/cxgb_l2t.c optional cxgb pci dev/cxgb/cxgb_lro.c optional cxgb pci dev/cxgb/cxgb_sge.c optional cxgb pci dev/cxgb/common/cxgb_mc5.c optional cxgb pci dev/cxgb/common/cxgb_vsc7323.c optional cxgb pci dev/cxgb/common/cxgb_vsc8211.c optional cxgb pci dev/cxgb/common/cxgb_ael1002.c optional cxgb pci dev/cxgb/common/cxgb_mv88e1xxx.c optional cxgb pci dev/cxgb/common/cxgb_xgmac.c optional cxgb pci dev/cxgb/common/cxgb_t3_hw.c optional cxgb pci dev/cxgb/sys/uipc_mvec.c optional cxgb pci dev/cxgb/sys/cxgb_support.c optional cxgb pci dev/cy/cy.c optional cy dev/cy/cy_isa.c optional cy isa dev/cy/cy_pci.c optional cy pci dev/dc/if_dc.c optional dc pci dev/dc/dcphy.c optional dc pci dev/dc/pnphy.c optional dc pci dev/dcons/dcons.c optional dcons dev/dcons/dcons_crom.c optional dcons_crom dev/dcons/dcons_os.c optional dcons dev/de/if_de.c optional de pci dev/digi/CX.c optional digi_CX dev/digi/CX_PCI.c optional digi_CX_PCI dev/digi/EPCX.c optional digi_EPCX dev/digi/EPCX_PCI.c optional digi_EPCX_PCI dev/digi/Xe.c optional digi_Xe dev/digi/Xem.c optional digi_Xem dev/digi/Xr.c optional digi_Xr dev/digi/digi.c optional digi dev/digi/digi_isa.c optional digi isa dev/digi/digi_pci.c optional digi pci dev/dpt/dpt_eisa.c optional dpt eisa dev/dpt/dpt_pci.c optional dpt pci dev/dpt/dpt_scsi.c optional dpt dev/drm/ati_pcigart.c optional drm dev/drm/drm_agpsupport.c optional drm dev/drm/drm_auth.c optional drm dev/drm/drm_bufs.c optional drm dev/drm/drm_context.c optional drm dev/drm/drm_dma.c optional drm dev/drm/drm_drawable.c optional drm dev/drm/drm_drv.c optional drm dev/drm/drm_fops.c optional drm dev/drm/drm_ioctl.c optional drm dev/drm/drm_irq.c optional drm dev/drm/drm_lock.c optional drm dev/drm/drm_memory.c optional drm dev/drm/drm_pci.c optional drm dev/drm/drm_scatter.c optional drm dev/drm/drm_sysctl.c optional drm dev/drm/drm_vm.c optional drm dev/drm/i915_dma.c optional i915drm dev/drm/i915_drv.c optional i915drm dev/drm/i915_irq.c optional i915drm dev/drm/i915_mem.c optional i915drm dev/drm/mach64_dma.c optional mach64drm dev/drm/mach64_drv.c optional mach64drm dev/drm/mach64_irq.c optional mach64drm dev/drm/mach64_state.c optional mach64drm dev/drm/mga_dma.c optional mgadrm dev/drm/mga_drv.c optional mgadrm dev/drm/mga_irq.c optional mgadrm dev/drm/mga_state.c optional mgadrm \ compile-with "${NORMAL_C} -finline-limit=13500" dev/drm/mga_warp.c optional mgadrm dev/drm/r128_cce.c optional r128drm dev/drm/r128_drv.c optional r128drm dev/drm/r128_irq.c optional r128drm dev/drm/r128_state.c optional r128drm \ compile-with "${NORMAL_C} -finline-limit=13500" dev/drm/r300_cmdbuf.c optional radeondrm dev/drm/radeon_cp.c optional radeondrm dev/drm/radeon_drv.c optional radeondrm dev/drm/radeon_irq.c optional radeondrm dev/drm/radeon_mem.c optional radeondrm dev/drm/radeon_state.c optional radeondrm dev/drm/savage_bci.c optional savagedrm dev/drm/savage_drv.c optional savagedrm dev/drm/savage_state.c optional savagedrm dev/drm/sis_drv.c optional sisdrm dev/drm/sis_ds.c optional sisdrm dev/drm/sis_mm.c optional sisdrm dev/drm/tdfx_drv.c optional tdfxdrm dev/ed/if_ed.c optional ed dev/ed/if_ed_novell.c optional ed dev/ed/if_ed_rtl80x9.c optional ed dev/ed/if_ed_pccard.c optional ed pccard dev/ed/if_ed_pci.c optional ed pci dev/eisa/eisa_if.m standard dev/eisa/eisaconf.c optional eisa dev/em/if_em.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_80003es2lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82540.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82541.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82542.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82543.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82571.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_82575.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_api.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_ich8lan.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_mac.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_manage.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_nvm.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/em/e1000_phy.c optional em \ compile-with "${NORMAL_C} -I$S/dev/em" dev/en/if_en_pci.c optional en pci dev/en/midway.c optional en dev/ep/if_ep.c optional ep dev/ep/if_ep_eisa.c optional ep eisa dev/ep/if_ep_isa.c optional ep isa dev/ep/if_ep_mca.c optional ep mca dev/ep/if_ep_pccard.c optional ep pccard dev/esp/ncr53c9x.c optional esp dev/ex/if_ex.c optional ex dev/ex/if_ex_isa.c optional ex isa dev/ex/if_ex_pccard.c optional ex pccard dev/exca/exca.c optional cbb dev/fatm/if_fatm.c optional fatm pci dev/fb/splash.c optional splash dev/fe/if_fe.c optional fe dev/fe/if_fe_pccard.c optional fe pccard dev/firewire/firewire.c optional firewire dev/firewire/fwcrom.c optional firewire dev/firewire/fwdev.c optional firewire dev/firewire/fwdma.c optional firewire dev/firewire/fwmem.c optional firewire dev/firewire/fwohci.c optional firewire dev/firewire/fwohci_pci.c optional firewire pci dev/firewire/if_fwe.c optional fwe dev/firewire/if_fwip.c optional fwip dev/firewire/sbp.c optional sbp dev/firewire/sbp_targ.c optional sbp_targ dev/flash/at45d.c optional at45d dev/fxp/if_fxp.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci #dev/harp/if_harp.c optional harp pci dev/hatm/if_hatm.c optional hatm pci dev/hatm/if_hatm_intr.c optional hatm pci dev/hatm/if_hatm_ioctl.c optional hatm pci dev/hatm/if_hatm_rx.c optional hatm pci dev/hatm/if_hatm_tx.c optional hatm pci #dev/hfa/fore_buffer.c optional hfa #dev/hfa/fore_command.c optional hfa #dev/hfa/fore_globals.c optional hfa #dev/hfa/fore_if.c optional hfa #dev/hfa/fore_init.c optional hfa #dev/hfa/fore_intr.c optional hfa #dev/hfa/fore_output.c optional hfa #dev/hfa/fore_receive.c optional hfa #dev/hfa/fore_stats.c optional hfa #dev/hfa/fore_timer.c optional hfa #dev/hfa/fore_transmit.c optional hfa #dev/hfa/fore_vcm.c optional hfa ##dev/hfa/hfa_eisa.c optional hfa eisa #dev/hfa/hfa_freebsd.c optional hfa #dev/hfa/hfa_pci.c optional hfa pci ##dev/hfa/hfa_sbus.c optional hfa sbus dev/hifn/hifn7751.c optional hifn dev/hme/if_hme.c optional hme dev/hme/if_hme_pci.c optional hme pci dev/hme/if_hme_sbus.c optional hme sbus dev/hptiop/hptiop.c optional hptiop cam dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida dev/ida/ida_disk.c optional ida dev/ida/ida_eisa.c optional ida eisa dev/ida/ida_pci.c optional ida pci dev/ie/if_ie.c optional ie isa nowerror dev/ie/if_ie_isa.c optional ie isa dev/ieee488/ibfoo.c optional pcii | tnt4882 dev/ieee488/pcii.c optional pcii dev/ieee488/tnt4882.c optional tnt4882 dev/ieee488/upd7210.c optional pcii | tnt4882 dev/iicbus/ad7418.c optional ad7418 dev/iicbus/ds1672.c optional ds1672 dev/iicbus/icee.c optional icee dev/iicbus/if_ic.c optional ic dev/iicbus/iic.c optional iic dev/iicbus/iicbb.c optional iicbb dev/iicbus/iicbb_if.m optional iicbb dev/iicbus/iicbus.c optional iicbus dev/iicbus/iicbus_if.m optional iicbus dev/iicbus/iiconf.c optional iicbus dev/iicbus/iicsmb.c optional iicsmb \ dependency "iicbus_if.h" dev/iir/iir.c optional iir dev/iir/iir_ctrl.c optional iir dev/iir/iir_pci.c optional iir pci dev/ips/ips.c optional ips dev/ips/ips_commands.c optional ips dev/ips/ips_disk.c optional ips dev/ips/ips_ioctl.c optional ips dev/ips/ips_pci.c optional ips pci dev/ipw/if_ipw.c optional ipw dev/iscsi/initiator/iscsi.c optional iscsi_initiator scbus dev/iscsi/initiator/iscsi_subr.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_cam.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_soc.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_sm.c optional iscsi_initiator scbus dev/iscsi/initiator/isc_subr.c optional iscsi_initiator scbus dev/isp/isp.c optional isp dev/isp/isp_freebsd.c optional isp dev/isp/isp_library.c optional isp dev/isp/isp_pci.c optional isp pci dev/isp/isp_sbus.c optional isp sbus dev/isp/isp_target.c optional isp dev/ispfw/ispfw.c optional ispfw dev/iwi/if_iwi.c optional iwi dev/ixgb/if_ixgb.c optional ixgb dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/ixgbe.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_common.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_82598.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/joy/joy.c optional joy dev/joy/joy_isa.c optional joy isa dev/joy/joy_pccard.c optional joy pccard dev/kbdmux/kbdmux.c optional kbdmux dev/le/am7990.c optional le dev/le/am79900.c optional le dev/le/if_le_pci.c optional le pci dev/le/lance.c optional le dev/led/led.c standard dev/lge/if_lge.c optional lge dev/lmc/if_lmc.c optional lmc dev/mc146818/mc146818.c optional mc146818 dev/mca/mca_bus.c optional mca dev/mcd/mcd.c optional mcd isa nowerror dev/mcd/mcd_isa.c optional mcd isa nowerror dev/md/md.c optional md dev/mem/memdev.c optional mem dev/mfi/mfi.c optional mfi dev/mfi/mfi_debug.c optional mfi dev/mfi/mfi_pci.c optional mfi pci dev/mfi/mfi_disk.c optional mfi dev/mfi/mfi_linux.c optional mfi compat_linux dev/mfi/mfi_cam.c optional mfip scbus dev/mii/acphy.c optional miibus | acphy dev/mii/amphy.c optional miibus | amphy dev/mii/bmtphy.c optional miibus | bmtphy dev/mii/brgphy.c optional miibus | brgphy dev/mii/ciphy.c optional miibus | ciphy dev/mii/e1000phy.c optional miibus | e1000phy # XXX only xl cards? dev/mii/exphy.c optional miibus | exphy dev/mii/gentbi.c optional miibus | gentbi dev/mii/icsphy.c optional miibus | icsphy # XXX only fxp cards? dev/mii/inphy.c optional miibus | inphy dev/mii/ip1000phy.c optional miibus | ip1000phy dev/mii/lxtphy.c optional miibus | lxtphy dev/mii/mii.c optional miibus | mii dev/mii/mii_physubr.c optional miibus | mii dev/mii/miibus_if.m optional miibus | mii dev/mii/mlphy.c optional miibus | mlphy dev/mii/nsgphy.c optional miibus | nsgphy dev/mii/nsphy.c optional miibus | nsphy dev/mii/pnaphy.c optional miibus | pnaphy dev/mii/qsphy.c optional miibus | qsphy dev/mii/rgephy.c optional miibus | rgephy dev/mii/rlphy.c optional miibus | rlphy dev/mii/rlswitch.c optional rlswitch # XXX rue only? dev/mii/ruephy.c optional miibus | ruephy dev/mii/tdkphy.c optional miibus | tdkphy dev/mii/tlphy.c optional miibus | tlphy dev/mii/ukphy.c optional miibus | mii dev/mii/ukphy_subr.c optional miibus | mii dev/mii/xmphy.c optional miibus | xmphy dev/mk48txx/mk48txx.c optional mk48txx dev/mlx/mlx.c optional mlx dev/mlx/mlx_disk.c optional mlx dev/mlx/mlx_pci.c optional mlx pci dev/mly/mly.c optional mly dev/mmc/mmc.c optional mmc dev/mmc/mmcbr_if.m standard dev/mmc/mmcbus_if.m standard dev/mmc/mmcsd.c optional mmcsd dev/mpt/mpt.c optional mpt dev/mpt/mpt_cam.c optional mpt dev/mpt/mpt_debug.c optional mpt dev/mpt/mpt_pci.c optional mpt pci dev/mpt/mpt_raid.c optional mpt dev/msk/if_msk.c optional msk dev/mxge/if_mxge.c optional mxge pci dev/mxge/mxge_lro.c optional mxge pci dev/mxge/mxge_eth_z8e.c optional mxge pci dev/mxge/mxge_ethp_z8e.c optional mxge pci dev/my/if_my.c optional my dev/ncv/ncr53c500.c optional ncv dev/ncv/ncr53c500_pccard.c optional ncv pccard dev/nge/if_nge.c optional nge dev/nxge/if_nxge.c optional nxge dev/nxge/xgehal/xgehal-device.c optional nxge dev/nxge/xgehal/xgehal-mm.c optional nxge dev/nxge/xgehal/xge-queue.c optional nxge dev/nxge/xgehal/xgehal-driver.c optional nxge dev/nxge/xgehal/xgehal-ring.c optional nxge dev/nxge/xgehal/xgehal-channel.c optional nxge dev/nxge/xgehal/xgehal-fifo.c optional nxge dev/nxge/xgehal/xgehal-stats.c optional nxge dev/nxge/xgehal/xgehal-config.c optional nxge dev/nxge/xgehal/xgehal-mgmt.c optional nxge dev/nmdm/nmdm.c optional nmdm dev/nsp/nsp.c optional nsp dev/nsp/nsp_pccard.c optional nsp pccard dev/null/null.c standard dev/patm/if_patm.c optional patm pci dev/patm/if_patm_attach.c optional patm pci dev/patm/if_patm_intr.c optional patm pci dev/patm/if_patm_ioctl.c optional patm pci dev/patm/if_patm_rtables.c optional patm pci dev/patm/if_patm_rx.c optional patm pci dev/patm/if_patm_tx.c optional patm pci dev/pbio/pbio.c optional pbio isa dev/pccard/card_if.m standard dev/pccard/pccard.c optional pccard dev/pccard/pccard_cis.c optional pccard dev/pccard/pccard_cis_quirks.c optional pccard dev/pccard/pccard_device.c optional pccard dev/pccard/power_if.m standard dev/pccbb/pccbb.c optional cbb dev/pccbb/pccbb_isa.c optional cbb isa dev/pccbb/pccbb_pci.c optional cbb pci dev/pcf/pcf.c optional pcf dev/pci/eisa_pci.c optional pci eisa dev/pci/fixup_pci.c optional pci dev/pci/hostb_pci.c optional pci dev/pci/ignore_pci.c optional pci dev/pci/isa_pci.c optional pci isa dev/pci/pci.c optional pci dev/pci/pci_if.m standard dev/pci/pci_pci.c optional pci dev/pci/pci_user.c optional pci dev/pci/pcib_if.m standard dev/pci/vga_pci.c optional pci dev/pdq/if_fea.c optional fea eisa dev/pdq/if_fpa.c optional fpa pci dev/pdq/pdq.c optional nowerror fea eisa | fpa pci dev/pdq/pdq_ifsubr.c optional nowerror fea eisa | fpa pci dev/ppbus/if_plip.c optional plip dev/ppbus/immio.c optional vpo dev/ppbus/lpbb.c optional lpbb dev/ppbus/lpt.c optional lpt dev/ppbus/pcfclock.c optional pcfclock dev/ppbus/ppb_1284.c optional ppbus dev/ppbus/ppb_base.c optional ppbus dev/ppbus/ppb_msq.c optional ppbus dev/ppbus/ppbconf.c optional ppbus dev/ppbus/ppbus_if.m optional ppbus dev/ppbus/ppi.c optional ppi dev/ppbus/pps.c optional pps dev/ppbus/vpo.c optional vpo dev/ppbus/vpoio.c optional vpo dev/ppc/ppc.c optional ppc dev/ppc/ppc_acpi.c optional ppc acpi dev/ppc/ppc_isa.c optional ppc isa dev/ppc/ppc_pci.c optional ppc pci dev/ppc/ppc_puc.c optional ppc puc dev/pst/pst-iop.c optional pst dev/pst/pst-pci.c optional pst pci dev/pst/pst-raid.c optional pst dev/puc/puc.c optional puc dev/puc/puc_cfg.c optional puc dev/puc/puc_pccard.c optional puc pccard dev/puc/puc_pci.c optional puc pci dev/puc/pucdata.c optional puc pci dev/ral/rt2560.c optional ral dev/ral/rt2661.c optional ral dev/ral/if_ralrate.c optional ral dev/ral/if_ral_pci.c optional ral pci dev/random/harvest.c standard dev/random/hash.c optional random dev/random/probe.c optional random dev/random/randomdev.c optional random dev/random/randomdev_soft.c optional random dev/random/yarrow.c optional random dev/ray/if_ray.c optional ray pccard dev/rc/rc.c optional rc dev/re/if_re.c optional re dev/rndtest/rndtest.c optional rndtest dev/rp/rp.c optional rp dev/rp/rp_isa.c optional rp isa dev/rp/rp_pci.c optional rp pci dev/safe/safe.c optional safe dev/sbsh/if_sbsh.c optional sbsh dev/scc/scc_if.m optional scc dev/scc/scc_bfe_ebus.c optional scc ebus dev/scc/scc_bfe_sbus.c optional scc fhc | scc sbus dev/scc/scc_core.c optional scc dev/scc/scc_dev_sab82532.c optional scc dev/scc/scc_dev_z8530.c optional scc dev/scd/scd.c optional scd isa dev/scd/scd_isa.c optional scd isa dev/si/si.c optional si dev/si/si2_z280.c optional si dev/si/si3_t225.c optional si dev/si/si_eisa.c optional si eisa dev/si/si_isa.c optional si isa dev/si/si_pci.c optional si pci dev/sk/if_sk.c optional sk pci dev/smbus/smb.c optional smb dev/smbus/smbconf.c optional smbus dev/smbus/smbus.c optional smbus dev/smbus/smbus_if.m optional smbus dev/sn/if_sn.c optional sn dev/sn/if_sn_isa.c optional sn isa dev/sn/if_sn_pccard.c optional sn pccard dev/snp/snp.c optional snp dev/sound/clone.c optional sound dev/sound/unit.c optional sound dev/sound/isa/ad1816.c optional snd_ad1816 isa dev/sound/isa/ess.c optional snd_ess isa dev/sound/isa/gusc.c optional snd_gusc isa dev/sound/isa/mss.c optional snd_mss isa dev/sound/isa/sb16.c optional snd_sb16 isa dev/sound/isa/sb8.c optional snd_sb8 isa dev/sound/isa/sbc.c optional snd_sbc isa dev/sound/isa/sndbuf_dma.c optional sound isa dev/sound/pci/als4000.c optional snd_als4000 pci dev/sound/pci/atiixp.c optional snd_atiixp pci #dev/sound/pci/au88x0.c optional snd_au88x0 pci dev/sound/pci/cmi.c optional snd_cmi pci dev/sound/pci/cs4281.c optional snd_cs4281 pci dev/sound/pci/csa.c optional snd_csa pci \ warning "kernel contains GPL contaminated csaimg.h header" dev/sound/pci/csapcm.c optional snd_csa pci dev/sound/pci/ds1.c optional snd_ds1 pci dev/sound/pci/emu10k1.c optional snd_emu10k1 pci \ dependency "emu10k1-alsa%diked.h" \ warning "kernel contains GPL contaminated emu10k1 headers" dev/sound/pci/emu10kx.c optional snd_emu10kx pci \ dependency "emu10k1-alsa%diked.h" \ dependency "p16v-alsa%diked.h" \ dependency "p17v-alsa%diked.h" \ warning "kernel contains GPL contaminated emu10kx headers" dev/sound/pci/emu10kx-pcm.c optional snd_emu10kx pci \ dependency "emu10k1-alsa%diked.h" \ dependency "p16v-alsa%diked.h" \ dependency "p17v-alsa%diked.h" \ warning "kernel contains GPL contaminated emu10kx headers" dev/sound/pci/emu10kx-midi.c optional snd_emu10kx pci \ dependency "emu10k1-alsa%diked.h" \ warning "kernel contains GPL contaminated emu10kx headers" dev/sound/pci/envy24.c optional snd_envy24 pci dev/sound/pci/envy24ht.c optional snd_envy24ht pci dev/sound/pci/es137x.c optional snd_es137x pci dev/sound/pci/fm801.c optional snd_fm801 pci dev/sound/pci/ich.c optional snd_ich pci dev/sound/pci/maestro.c optional snd_maestro pci dev/sound/pci/maestro3.c optional snd_maestro3 pci \ warning "kernel contains GPL contaminated maestro3 headers" dev/sound/pci/neomagic.c optional snd_neomagic pci dev/sound/pci/solo.c optional snd_solo pci dev/sound/pci/spicds.c optional snd_spicds pci dev/sound/pci/t4dwave.c optional snd_t4dwave pci dev/sound/pci/via8233.c optional snd_via8233 pci dev/sound/pci/via82c686.c optional snd_via82c686 pci dev/sound/pci/vibes.c optional snd_vibes pci dev/sound/pci/hda/hdac.c optional snd_hda pci dev/sound/pcm/ac97.c optional sound dev/sound/pcm/ac97_if.m optional sound dev/sound/pcm/ac97_patch.c optional sound dev/sound/pcm/buffer.c optional sound dev/sound/pcm/channel.c optional sound dev/sound/pcm/channel_if.m optional sound dev/sound/pcm/dsp.c optional sound dev/sound/pcm/fake.c optional sound dev/sound/pcm/feeder.c optional sound dev/sound/pcm/feeder_fmt.c optional sound dev/sound/pcm/feeder_if.m optional sound dev/sound/pcm/feeder_rate.c optional sound dev/sound/pcm/feeder_volume.c optional sound dev/sound/pcm/mixer.c optional sound dev/sound/pcm/mixer_if.m optional sound dev/sound/pcm/sndstat.c optional sound dev/sound/pcm/sound.c optional sound dev/sound/pcm/vchan.c optional sound #dev/sound/usb/upcm.c optional snd_upcm usb dev/sound/usb/uaudio.c optional snd_uaudio usb dev/sound/usb/uaudio_pcm.c optional snd_uaudio usb dev/sound/midi/midi.c optional sound dev/sound/midi/mpu401.c optional sound dev/sound/midi/mpu_if.m optional sound dev/sound/midi/mpufoi_if.m optional sound dev/sound/midi/sequencer.c optional sound dev/sound/midi/synth_if.m optional sound dev/spibus/spibus.c optional spibus \ dependency "spibus_if.h" dev/spibus/spibus_if.m optional spibus dev/sr/if_sr.c optional sr dev/sr/if_sr_pci.c optional sr pci dev/stg/tmc18c30.c optional stg dev/stg/tmc18c30_isa.c optional stg isa dev/stg/tmc18c30_pccard.c optional stg pccard dev/stg/tmc18c30_pci.c optional stg pci dev/stg/tmc18c30_subr.c optional stg dev/stge/if_stge.c optional stge dev/streams/streams.c optional streams dev/sym/sym_hipd.c optional sym \ dependency "$S/dev/sym/sym_{conf,defs}.h" dev/syscons/blank/blank_saver.c optional blank_saver dev/syscons/daemon/daemon_saver.c optional daemon_saver dev/syscons/dragon/dragon_saver.c optional dragon_saver dev/syscons/fade/fade_saver.c optional fade_saver dev/syscons/fire/fire_saver.c optional fire_saver dev/syscons/green/green_saver.c optional green_saver dev/syscons/logo/logo.c optional logo_saver dev/syscons/logo/logo_saver.c optional logo_saver dev/syscons/rain/rain_saver.c optional rain_saver dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm-dumb.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scvidctl.c optional sc dev/syscons/snake/snake_saver.c optional snake_saver dev/syscons/star/star_saver.c optional star_saver dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc dev/syscons/warp/warp_saver.c optional warp_saver dev/tdfx/tdfx_linux.c optional tdfx_linux tdfx compat_linux dev/tdfx/tdfx_pci.c optional tdfx pci dev/ti/if_ti.c optional ti pci dev/trm/trm.c optional trm dev/twa/tw_cl_init.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_intr.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_io.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_cl_misc.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_cam.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twa/tw_osl_freebsd.c optional twa \ compile-with "${NORMAL_C} -I$S/dev/twa" dev/twe/twe.c optional twe dev/twe/twe_freebsd.c optional twe dev/tx/if_tx.c optional tx dev/txp/if_txp.c optional txp dev/uart/uart_bus_acpi.c optional uart acpi #dev/uart/uart_bus_cbus.c optional uart cbus dev/uart/uart_bus_ebus.c optional uart ebus dev/uart/uart_bus_isa.c optional uart isa dev/uart/uart_bus_pccard.c optional uart pccard dev/uart/uart_bus_pci.c optional uart pci dev/uart/uart_bus_puc.c optional uart puc dev/uart/uart_bus_scc.c optional uart scc dev/uart/uart_core.c optional uart dev/uart/uart_dbg.c optional uart gdb dev/uart/uart_dev_ns8250.c optional uart uart_ns8250 dev/uart/uart_dev_sab82532.c optional uart uart_sab82532 dev/uart/uart_dev_sab82532.c optional uart scc dev/uart/uart_dev_z8530.c optional uart uart_z8530 dev/uart/uart_dev_z8530.c optional uart scc dev/uart/uart_if.m optional uart dev/uart/uart_subr.c optional uart dev/uart/uart_tty.c optional uart dev/ubsec/ubsec.c optional ubsec # # USB support dev/usb/ehci.c optional ehci dev/usb/ehci_pci.c optional ehci pci dev/usb/hid.c optional usb dev/usb/if_aue.c optional aue dev/usb/if_axe.c optional axe dev/usb/if_cdce.c optional cdce dev/usb/if_cue.c optional cue dev/usb/if_kue.c optional kue dev/usb/if_ural.c optional ural dev/usb/if_rue.c optional rue dev/usb/if_rum.c optional rum dev/usb/if_udav.c optional udav dev/usb/if_zyd.c optional zyd dev/usb/ohci.c optional ohci dev/usb/ohci_pci.c optional ohci pci dev/usb/sl811hs.c optional slhci dev/usb/slhci_pccard.c optional slhci pccard dev/usb/uark.c optional uark dev/usb/ubsa.c optional ubsa dev/usb/ubser.c optional ubser dev/usb/ucom.c optional ucom dev/usb/ucycom.c optional ucycom dev/usb/udbp.c optional udbp dev/usb/ufoma.c optional ufoma dev/usb/ufm.c optional ufm dev/usb/uftdi.c optional uftdi dev/usb/ugen.c optional ugen dev/usb/uhci.c optional uhci dev/usb/uhci_pci.c optional uhci pci dev/usb/uhid.c optional uhid dev/usb/uhub.c optional usb dev/usb/uipaq.c optional uipaq dev/usb/ukbd.c optional ukbd dev/usb/ulpt.c optional ulpt dev/usb/umass.c optional umass dev/usb/umct.c optional umct dev/usb/umodem.c optional umodem dev/usb/ums.c optional ums dev/usb/uplcom.c optional uplcom dev/usb/urio.c optional urio dev/usb/usb.c optional usb dev/usb/usb_ethersubr.c optional usb dev/usb/usb_if.m optional usb dev/usb/usb_mem.c optional usb dev/usb/usb_quirks.c optional usb dev/usb/usb_subr.c optional usb dev/usb/usbdi.c optional usb dev/usb/usbdi_util.c optional usb dev/usb/uscanner.c optional uscanner dev/usb/uvisor.c optional uvisor dev/usb/uvscom.c optional uvscom dev/utopia/idtphy.c optional utopia dev/utopia/suni.c optional utopia dev/utopia/utopia.c optional utopia dev/vge/if_vge.c optional vge dev/vkbd/vkbd.c optional vkbd dev/vx/if_vx.c optional vx dev/vx/if_vx_eisa.c optional vx eisa dev/vx/if_vx_pci.c optional vx pci dev/watchdog/watchdog.c standard dev/wds/wd7000.c optional wds isa dev/wi/if_wi.c optional wi dev/wi/if_wi_pccard.c optional wi pccard dev/wi/if_wi_pci.c optional wi pci dev/wl/if_wl.c optional wl isa dev/xe/if_xe.c optional xe dev/xe/if_xe_pccard.c optional xe pccard fs/coda/coda_fbsd.c optional vcoda fs/coda/coda_namecache.c optional vcoda fs/coda/coda_psdev.c optional vcoda fs/coda/coda_subr.c optional vcoda fs/coda/coda_venus.c optional vcoda fs/coda/coda_vfsops.c optional vcoda fs/coda/coda_vnops.c optional vcoda fs/deadfs/dead_vnops.c standard fs/devfs/devfs_devs.c standard fs/devfs/devfs_rule.c standard fs/devfs/devfs_vfsops.c standard fs/devfs/devfs_vnops.c standard fs/fdescfs/fdesc_vfsops.c optional fdescfs fs/fdescfs/fdesc_vnops.c optional fdescfs fs/fifofs/fifo_vnops.c standard fs/hpfs/hpfs_alsubr.c optional hpfs fs/hpfs/hpfs_lookup.c optional hpfs fs/hpfs/hpfs_subr.c optional hpfs fs/hpfs/hpfs_vfsops.c optional hpfs fs/hpfs/hpfs_vnops.c optional hpfs fs/msdosfs/msdosfs_conv.c optional msdosfs fs/msdosfs/msdosfs_denode.c optional msdosfs fs/msdosfs/msdosfs_fat.c optional msdosfs fs/msdosfs/msdosfs_fileno.c optional msdosfs fs/msdosfs/msdosfs_iconv.c optional msdosfs_iconv fs/msdosfs/msdosfs_lookup.c optional msdosfs fs/msdosfs/msdosfs_vfsops.c optional msdosfs fs/msdosfs/msdosfs_vnops.c optional msdosfs fs/ntfs/ntfs_compr.c optional ntfs fs/ntfs/ntfs_iconv.c optional ntfs_iconv fs/ntfs/ntfs_ihash.c optional ntfs fs/ntfs/ntfs_subr.c optional ntfs fs/ntfs/ntfs_vfsops.c optional ntfs fs/ntfs/ntfs_vnops.c optional ntfs fs/nullfs/null_subr.c optional nullfs fs/nullfs/null_vfsops.c optional nullfs fs/nullfs/null_vnops.c optional nullfs fs/nwfs/nwfs_io.c optional nwfs fs/nwfs/nwfs_ioctl.c optional nwfs fs/nwfs/nwfs_node.c optional nwfs fs/nwfs/nwfs_subr.c optional nwfs fs/nwfs/nwfs_vfsops.c optional nwfs fs/nwfs/nwfs_vnops.c optional nwfs fs/portalfs/portal_vfsops.c optional portalfs fs/portalfs/portal_vnops.c optional portalfs fs/procfs/procfs.c optional procfs fs/procfs/procfs_ctl.c optional procfs fs/procfs/procfs_dbregs.c optional procfs fs/procfs/procfs_fpregs.c optional procfs fs/procfs/procfs_ioctl.c optional procfs fs/procfs/procfs_map.c optional procfs fs/procfs/procfs_mem.c optional procfs fs/procfs/procfs_note.c optional procfs fs/procfs/procfs_regs.c optional procfs fs/procfs/procfs_rlimit.c optional procfs fs/procfs/procfs_status.c optional procfs fs/procfs/procfs_type.c optional procfs fs/pseudofs/pseudofs.c optional pseudofs fs/pseudofs/pseudofs_fileno.c optional pseudofs fs/pseudofs/pseudofs_vncache.c optional pseudofs fs/pseudofs/pseudofs_vnops.c optional pseudofs fs/smbfs/smbfs_io.c optional smbfs fs/smbfs/smbfs_node.c optional smbfs fs/smbfs/smbfs_smb.c optional smbfs fs/smbfs/smbfs_subr.c optional smbfs fs/smbfs/smbfs_vfsops.c optional smbfs fs/smbfs/smbfs_vnops.c optional smbfs fs/udf/osta.c optional udf fs/udf/udf_iconv.c optional udf_iconv fs/udf/udf_vfsops.c optional udf fs/udf/udf_vnops.c optional udf fs/unionfs/union_subr.c optional unionfs fs/unionfs/union_vfsops.c optional unionfs fs/unionfs/union_vnops.c optional unionfs fs/tmpfs/tmpfs_vnops.c optional tmpfs fs/tmpfs/tmpfs_fifoops.c optional tmpfs fs/tmpfs/tmpfs_vfsops.c optional tmpfs fs/tmpfs/tmpfs_subr.c optional tmpfs gdb/gdb_cons.c optional gdb gdb/gdb_main.c optional gdb gdb/gdb_packet.c optional gdb geom/bde/g_bde.c optional geom_bde geom/bde/g_bde_crypt.c optional geom_bde geom/bde/g_bde_lock.c optional geom_bde geom/bde/g_bde_work.c optional geom_bde geom/cache/g_cache.c optional geom_cache geom/concat/g_concat.c optional geom_concat geom/eli/g_eli.c optional geom_eli geom/eli/g_eli_crypto.c optional geom_eli geom/eli/g_eli_ctl.c optional geom_eli geom/eli/g_eli_integrity.c optional geom_eli geom/eli/g_eli_key.c optional geom_eli geom/eli/g_eli_privacy.c optional geom_eli geom/eli/pkcs5v2.c optional geom_eli geom/gate/g_gate.c optional geom_gate geom/geom_aes.c optional geom_aes geom/geom_bsd.c optional geom_bsd geom/geom_bsd_enc.c optional geom_bsd geom/geom_ccd.c optional ccd | geom_ccd geom/geom_ctl.c standard geom/geom_dev.c standard geom/geom_disk.c standard geom/geom_dump.c standard geom/geom_event.c standard geom/geom_fox.c optional geom_fox geom/geom_io.c standard geom/geom_kern.c standard geom/geom_mbr.c optional geom_mbr geom/geom_mbr_enc.c optional geom_mbr geom/geom_pc98.c optional geom_pc98 geom/geom_pc98_enc.c optional geom_pc98 geom/geom_slice.c standard geom/geom_subr.c standard geom/geom_sunlabel.c optional geom_sunlabel geom/geom_sunlabel_enc.c optional geom_sunlabel geom/geom_vfs.c standard geom/geom_vol_ffs.c optional geom_vol geom/journal/g_journal.c optional geom_journal geom/journal/g_journal_ufs.c optional geom_journal geom/label/g_label.c optional geom_label geom/label/g_label_ext2fs.c optional geom_label geom/label/g_label_iso9660.c optional geom_label geom/label/g_label_msdosfs.c optional geom_label geom/label/g_label_ntfs.c optional geom_label geom/label/g_label_reiserfs.c optional geom_label geom/label/g_label_ufs.c optional geom_label geom/mirror/g_mirror.c optional geom_mirror geom/mirror/g_mirror_ctl.c optional geom_mirror geom/multipath/g_multipath.c optional geom_multipath geom/nop/g_nop.c optional geom_nop geom/part/g_part.c standard geom/part/g_part_if.m standard geom/part/g_part_apm.c optional geom_part_apm geom/part/g_part_bsd.c optional geom_part_bsd geom/part/g_part_gpt.c optional geom_part_gpt geom/part/g_part_mbr.c optional geom_part_mbr geom/raid3/g_raid3.c optional geom_raid3 geom/raid3/g_raid3_ctl.c optional geom_raid3 geom/shsec/g_shsec.c optional geom_shsec geom/stripe/g_stripe.c optional geom_stripe geom/uzip/g_uzip.c optional geom_uzip geom/virstor/binstream.c optional geom_virstor geom/virstor/g_virstor.c optional geom_virstor geom/virstor/g_virstor_md.c optional geom_virstor geom/zero/g_zero.c optional geom_zero gnu/fs/ext2fs/ext2_alloc.c optional ext2fs \ warning "kernel contains GPL contaminated ext2fs filesystem" gnu/fs/ext2fs/ext2_balloc.c optional ext2fs gnu/fs/ext2fs/ext2_bmap.c optional ext2fs gnu/fs/ext2fs/ext2_inode.c optional ext2fs gnu/fs/ext2fs/ext2_inode_cnv.c optional ext2fs gnu/fs/ext2fs/ext2_linux_balloc.c optional ext2fs gnu/fs/ext2fs/ext2_linux_ialloc.c optional ext2fs gnu/fs/ext2fs/ext2_lookup.c optional ext2fs gnu/fs/ext2fs/ext2_subr.c optional ext2fs gnu/fs/ext2fs/ext2_vfsops.c optional ext2fs gnu/fs/ext2fs/ext2_vnops.c optional ext2fs gnu/fs/reiserfs/reiserfs_hashes.c optional reiserfs \ warning "kernel contains GPL contaminated ReiserFS filesystem" gnu/fs/reiserfs/reiserfs_inode.c optional reiserfs gnu/fs/reiserfs/reiserfs_item_ops.c optional reiserfs gnu/fs/reiserfs/reiserfs_namei.c optional reiserfs gnu/fs/reiserfs/reiserfs_prints.c optional reiserfs gnu/fs/reiserfs/reiserfs_stree.c optional reiserfs gnu/fs/reiserfs/reiserfs_vfsops.c optional reiserfs gnu/fs/reiserfs/reiserfs_vnops.c optional reiserfs # # isdn4bsd device drivers # i4b/driver/i4b_trace.c optional i4btrc i4b/driver/i4b_rbch.c optional i4brbch i4b/driver/i4b_tel.c optional i4btel #XXXBZ#i4b/driver/i4b_ipr.c optional i4bipr net/slcompress.c optional i4bipr | i4bisppp i4b/driver/i4b_ctl.c optional i4bctl #XXXBZ#i4b/driver/i4b_ing.c optional i4bing #XXXBZ#i4b/driver/i4b_isppp.c optional i4bisppp # # isdn4bsd CAPI driver # i4b/capi/capi_l4if.c optional i4bcapi i4b/capi/capi_llif.c optional i4bcapi i4b/capi/capi_msgs.c optional i4bcapi # # isdn4bsd AVM B1/T1 CAPI driver # i4b/capi/iavc/iavc_pci.c optional iavc i4bcapi pci i4b/capi/iavc/iavc_isa.c optional iavc i4bcapi isa i4b/capi/iavc/iavc_lli.c optional iavc i4bcapi i4b/capi/iavc/iavc_card.c optional iavc i4bcapi # # isdn4bsd support # i4b/layer2/i4b_mbuf.c optional i4btrc # # isdn4bsd Q.921 handler # i4b/layer2/i4b_l2.c optional i4bq921 i4b/layer2/i4b_l2fsm.c optional i4bq921 i4b/layer2/i4b_uframe.c optional i4bq921 i4b/layer2/i4b_tei.c optional i4bq921 i4b/layer2/i4b_sframe.c optional i4bq921 i4b/layer2/i4b_iframe.c optional i4bq921 i4b/layer2/i4b_l2timer.c optional i4bq921 i4b/layer2/i4b_util.c optional i4bq921 i4b/layer2/i4b_lme.c optional i4bq921 # # isdn4bsd Q.931 handler # i4b/layer3/i4b_q931.c optional i4bq931 i4b/layer3/i4b_l3fsm.c optional i4bq931 i4b/layer3/i4b_l3timer.c optional i4bq931 i4b/layer3/i4b_l2if.c optional i4bq931 i4b/layer3/i4b_l4if.c optional i4bq931 i4b/layer3/i4b_q932fac.c optional i4bq931 # # isdn4bsd control device driver, interface to isdnd # i4b/layer4/i4b_i4bdrv.c optional i4b i4b/layer4/i4b_l4.c optional i4b i4b/layer4/i4b_l4mgmt.c optional i4b i4b/layer4/i4b_l4timer.c optional i4b # isa/isa_if.m standard isa/isa_common.c optional isa isa/isahint.c optional isa isa/orm.c optional isa isa/pnp.c optional isa isapnp isa/pnpparse.c optional isa isapnp fs/cd9660/cd9660_bmap.c optional cd9660 fs/cd9660/cd9660_lookup.c optional cd9660 fs/cd9660/cd9660_node.c optional cd9660 fs/cd9660/cd9660_rrip.c optional cd9660 fs/cd9660/cd9660_util.c optional cd9660 fs/cd9660/cd9660_vfsops.c optional cd9660 fs/cd9660/cd9660_vnops.c optional cd9660 fs/cd9660/cd9660_iconv.c optional cd9660_iconv kern/bus_if.m standard kern/clock_if.m optional genclock kern/cpufreq_if.m standard kern/device_if.m standard kern/imgact_elf.c standard kern/imgact_shell.c standard kern/inflate.c optional gzip kern/init_main.c standard kern/init_sysent.c standard kern/ksched.c optional _kposix_priority_scheduling kern/kern_acct.c standard kern/kern_alq.c optional alq kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cpu.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_environment.c standard kern/kern_event.c standard kern/kern_exec.c standard kern/kern_exit.c standard kern/kern_fork.c standard kern/kern_idle.c standard kern/kern_intr.c standard kern/kern_jail.c standard kern/kern_kse.c standard kern/kern_kthread.c standard kern/kern_ktr.c optional ktr kern/kern_ktrace.c standard kern/kern_linker.c standard kern/kern_lock.c standard kern/kern_lockf.c standard kern/kern_malloc.c standard kern/kern_mbuf.c standard kern/kern_mib.c standard kern/kern_module.c standard kern/kern_mtxpool.c standard kern/kern_mutex.c standard kern/kern_ntptime.c standard kern/kern_physio.c standard kern/kern_pmc.c standard kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_prot.c standard kern/kern_resource.c standard kern/kern_rmlock.c standard kern/kern_rwlock.c standard kern/kern_sema.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard kern/kern_subr.c standard kern/kern_sx.c standard kern/kern_synch.c standard kern/kern_syscalls.c standard kern/kern_sysctl.c standard kern/kern_tc.c standard kern/kern_thr.c standard kern/kern_thread.c standard kern/kern_time.c standard kern/kern_timeout.c standard kern/kern_umtx.c standard kern/kern_uuid.c standard kern/kern_xxx.c standard kern/link_elf.c standard kern/linker_if.m standard kern/md4c.c optional netsmb kern/md5c.c standard kern/p1003_1b.c standard kern/posix4_mib.c standard kern/sched_4bsd.c optional sched_4bsd kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/subr_acl_posix1e.c standard kern/subr_autoconf.c standard kern/subr_blist.c standard kern/subr_bus.c standard kern/subr_clock.c standard kern/subr_devstat.c standard kern/subr_disk.c standard kern/subr_eventhandler.c standard kern/subr_fattime.c standard kern/subr_firmware.c optional firmware kern/subr_hints.c standard kern/subr_kdb.c standard kern/subr_kobj.c standard kern/subr_lock.c standard kern/subr_log.c standard kern/subr_mbpool.c optional libmbpool kern/subr_mchain.c optional libmchain kern/subr_module.c standard kern/subr_msgbuf.c standard kern/subr_param.c standard kern/subr_pcpu.c standard kern/subr_power.c standard kern/subr_prf.c standard kern/subr_prof.c standard kern/subr_rman.c standard kern/subr_rtc.c optional genclock kern/subr_sbuf.c standard kern/subr_scanf.c standard kern/subr_sleepqueue.c standard kern/subr_smp.c standard kern/subr_stack.c optional ddb | stack kern/subr_taskqueue.c standard kern/subr_trap.c standard kern/subr_turnstile.c standard kern/subr_unit.c standard kern/subr_witness.c optional witness kern/sys_generic.c standard kern/sys_pipe.c standard kern/sys_process.c standard kern/sys_socket.c standard kern/syscalls.c optional witness | invariants kern/sysv_ipc.c standard kern/sysv_msg.c optional sysvmsg kern/sysv_sem.c optional sysvsem kern/sysv_shm.c optional sysvshm kern/tty.c standard kern/tty_compat.c optional compat_43tty kern/tty_conf.c standard kern/tty_cons.c standard kern/tty_pts.c optional pty kern/tty_pty.c optional pty kern/tty_subr.c standard kern/tty_tty.c standard kern/uipc_accf.c optional inet kern/uipc_cow.c optional zero_copy_sockets kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_sockbuf.c standard kern/uipc_socket.c standard kern/uipc_syscalls.c standard kern/uipc_usrreq.c standard kern/vfs_acl.c standard kern/vfs_aio.c optional vfs_aio kern/vfs_bio.c standard kern/vfs_cache.c standard kern/vfs_cluster.c standard kern/vfs_default.c standard kern/vfs_export.c standard kern/vfs_extattr.c standard kern/vfs_hash.c standard kern/vfs_init.c standard kern/vfs_lookup.c standard kern/vfs_mount.c standard kern/vfs_subr.c standard kern/vfs_syscalls.c standard kern/vfs_vnops.c standard # # These files in libkern/ are those needed by all architectures. Some # of the files in libkern/ are only needed on some architectures, e.g., # libkern/divdi3.c is needed by i386 but not alpha. Also, some of these # routines may be optimized for a particular platform. In either case, # the file should be moved to conf/files. from here. # libkern/arc4random.c standard libkern/bcd.c standard libkern/bsearch.c standard libkern/crc32.c standard libkern/fnmatch.c standard libkern/gets.c standard libkern/iconv.c optional libiconv libkern/iconv_converter_if.m optional libiconv libkern/iconv_xlat.c optional libiconv libkern/iconv_xlat16.c optional libiconv libkern/index.c standard libkern/inet_ntoa.c standard libkern/mcount.c optional profiling-routine libkern/qsort.c standard libkern/qsort_r.c standard libkern/random.c standard libkern/rindex.c standard libkern/scanc.c standard libkern/skpc.c standard libkern/strcasecmp.c standard libkern/strcat.c standard libkern/strcmp.c standard libkern/strcpy.c standard libkern/strdup.c standard libkern/strlcat.c standard libkern/strlcpy.c standard libkern/strlen.c standard libkern/strncmp.c standard libkern/strncpy.c standard libkern/strsep.c standard libkern/strspn.c standard libkern/strstr.c standard libkern/strtol.c standard libkern/strtoq.c standard libkern/strtoul.c standard libkern/strtouq.c standard libkern/strvalid.c standard net/bpf.c standard net/bpf_jitter.c optional bpf_jitter net/bpf_filter.c optional bpf | netgraph_bpf net/bridgestp.c optional bridge | if_bridge net/bsd_comp.c optional ppp_bsdcomp net/ieee8023ad_lacp.c optional lagg net/if.c standard net/if_arcsubr.c optional arcnet net/if_atmsubr.c optional atm net/if_bridge.c optional bridge | if_bridge net/if_clone.c standard net/if_disc.c optional disc net/if_edsc.c optional edsc net/if_ef.c optional ef net/if_enc.c optional enc net/if_ethersubr.c optional ether \ compile-with "${NORMAL_C} -I$S/contrib/pf" net/if_faith.c optional faith net/if_fddisubr.c optional fddi net/if_fwsubr.c optional fwip net/if_gif.c optional gif net/if_gre.c optional gre net/if_iso88025subr.c optional token net/if_lagg.c optional lagg net/if_loop.c optional loop net/if_media.c standard net/if_mib.c standard net/if_ppp.c optional ppp net/if_sl.c optional sl net/if_spppfr.c optional i4bisppp | sppp | netgraph_sppp net/if_spppsubr.c optional i4bisppp | sppp | netgraph_sppp net/if_stf.c optional stf net/if_tun.c optional tun net/if_tap.c optional tap net/if_vlan.c optional vlan net/mppcc.c optional netgraph_mppc_compression net/mppcd.c optional netgraph_mppc_compression net/netisr.c standard net/ppp_deflate.c optional ppp_deflate net/ppp_tty.c optional ppp net/pfil.c optional ether | inet net/radix.c standard net/raw_cb.c standard net/raw_usrreq.c standard net/route.c standard net/rtsock.c standard net/slcompress.c optional netgraph_vjc | ppp | sl | sppp | \ netgraph_sppp net/zlib.c optional crypto | geom_uzip | ipsec | \ mxge | ppp_deflate | netgraph_deflate net80211/ieee80211.c optional wlan net80211/ieee80211_acl.c optional wlan_acl net80211/ieee80211_amrr.c optional wlan_amrr net80211/ieee80211_crypto.c optional wlan net80211/ieee80211_crypto_ccmp.c optional wlan_ccmp net80211/ieee80211_crypto_none.c optional wlan net80211/ieee80211_crypto_tkip.c optional wlan_tkip net80211/ieee80211_crypto_wep.c optional wlan_wep net80211/ieee80211_freebsd.c optional wlan net80211/ieee80211_ht.c optional wlan net80211/ieee80211_input.c optional wlan net80211/ieee80211_ioctl.c optional wlan net80211/ieee80211_node.c optional wlan net80211/ieee80211_output.c optional wlan net80211/ieee80211_power.c optional wlan net80211/ieee80211_proto.c optional wlan net80211/ieee80211_regdomain.c optional wlan net80211/ieee80211_scan.c optional wlan net80211/ieee80211_scan_ap.c optional wlan_scan_ap net80211/ieee80211_scan_sta.c optional wlan_scan_sta net80211/ieee80211_xauth.c optional wlan_xauth netatalk/aarp.c optional netatalk netatalk/at_control.c optional netatalk netatalk/at_proto.c optional netatalk netatalk/at_rmx.c optional netatalkdebug netatalk/ddp_input.c optional netatalk netatalk/ddp_output.c optional netatalk netatalk/ddp_pcb.c optional netatalk netatalk/ddp_usrreq.c optional netatalk # # netatm has been disconnected from the build until it is MPSAFE, or # alternatively, until it is removed. # #netatm/atm_aal5.c optional atm_core #netatm/atm_cm.c optional atm_core #netatm/atm_device.c optional atm_core #netatm/atm_if.c optional atm_core #netatm/atm_proto.c optional atm_core #netatm/atm_signal.c optional atm_core #netatm/atm_socket.c optional atm_core #netatm/atm_subr.c optional atm_core #netatm/atm_usrreq.c optional atm_core #netatm/ipatm/ipatm_event.c optional atm_ip atm_core #netatm/ipatm/ipatm_if.c optional atm_ip atm_core #netatm/ipatm/ipatm_input.c optional atm_ip atm_core #netatm/ipatm/ipatm_load.c optional atm_ip atm_core #netatm/ipatm/ipatm_output.c optional atm_ip atm_core #netatm/ipatm/ipatm_usrreq.c optional atm_ip atm_core #netatm/ipatm/ipatm_vcm.c optional atm_ip atm_core #netatm/sigpvc/sigpvc_if.c optional atm_sigpvc atm_core #netatm/sigpvc/sigpvc_subr.c optional atm_sigpvc atm_core #netatm/spans/spans_arp.c optional atm_spans atm_core \ # dependency "spans_xdr.h" #netatm/spans/spans_cls.c optional atm_spans atm_core #netatm/spans/spans_if.c optional atm_spans atm_core #netatm/spans/spans_kxdr.c optional atm_spans atm_core #netatm/spans/spans_msg.c optional atm_spans atm_core #netatm/spans/spans_print.c optional atm_spans atm_core #netatm/spans/spans_proto.c optional atm_spans atm_core #netatm/spans/spans_subr.c optional atm_spans atm_core #netatm/spans/spans_util.c optional atm_spans atm_core #spans_xdr.h optional atm_spans atm_core \ # before-depend \ # dependency "$S/netatm/spans/spans_xdr.x" \ # compile-with "rpcgen -h -C $S/netatm/spans/spans_xdr.x | grep -v rpc/rpc.h > spans_xdr.h" \ # clean "spans_xdr.h" \ # no-obj no-implicit-rule #spans_xdr.c optional atm_spans atm_core \ # before-depend \ # dependency "$S/netatm/spans/spans_xdr.x" \ # compile-with "rpcgen -c -C $S/netatm/spans/spans_xdr.x | grep -v rpc/rpc.h > spans_xdr.c" \ # clean "spans_xdr.c" \ # no-obj no-implicit-rule local #spans_xdr.o optional atm_spans atm_core \ # dependency "$S/netatm/spans/spans_xdr.x" \ # compile-with "${NORMAL_C}" \ # no-implicit-rule local #netatm/uni/q2110_sigaa.c optional atm_uni atm_core #netatm/uni/q2110_sigcpcs.c optional atm_uni atm_core #netatm/uni/q2110_subr.c optional atm_uni atm_core #netatm/uni/qsaal1_sigaa.c optional atm_uni atm_core #netatm/uni/qsaal1_sigcpcs.c optional atm_uni atm_core #netatm/uni/qsaal1_subr.c optional atm_uni atm_core #netatm/uni/sscf_uni.c optional atm_uni atm_core #netatm/uni/sscf_uni_lower.c optional atm_uni atm_core #netatm/uni/sscf_uni_upper.c optional atm_uni atm_core #netatm/uni/sscop.c optional atm_uni atm_core #netatm/uni/sscop_lower.c optional atm_uni atm_core #netatm/uni/sscop_pdu.c optional atm_uni atm_core #netatm/uni/sscop_sigaa.c optional atm_uni atm_core #netatm/uni/sscop_sigcpcs.c optional atm_uni atm_core #netatm/uni/sscop_subr.c optional atm_uni atm_core #netatm/uni/sscop_timer.c optional atm_uni atm_core #netatm/uni/sscop_upper.c optional atm_uni atm_core #netatm/uni/uni_load.c optional atm_uni atm_core #netatm/uni/uniarp.c optional atm_uni atm_core #netatm/uni/uniarp_cache.c optional atm_uni atm_core #netatm/uni/uniarp_input.c optional atm_uni atm_core #netatm/uni/uniarp_output.c optional atm_uni atm_core #netatm/uni/uniarp_timer.c optional atm_uni atm_core #netatm/uni/uniarp_vcm.c optional atm_uni atm_core #netatm/uni/uniip.c optional atm_uni atm_core #netatm/uni/unisig_decode.c optional atm_uni atm_core #netatm/uni/unisig_encode.c optional atm_uni atm_core #netatm/uni/unisig_if.c optional atm_uni atm_core #netatm/uni/unisig_mbuf.c optional atm_uni atm_core #netatm/uni/unisig_msg.c optional atm_uni atm_core #netatm/uni/unisig_print.c optional atm_uni atm_core #netatm/uni/unisig_proto.c optional atm_uni atm_core #netatm/uni/unisig_sigmgr_state.c optional atm_uni atm_core #netatm/uni/unisig_subr.c optional atm_uni atm_core #netatm/uni/unisig_util.c optional atm_uni atm_core #netatm/uni/unisig_vc_state.c optional atm_uni atm_core #netgraph/atm/atmpif/ng_atmpif.c optional netgraph_atm_atmpif #netgraph/atm/atmpif/ng_atmpif_harp.c optional netgraph_atm_atmpif netgraph/atm/ccatm/ng_ccatm.c optional ngatm_ccatm \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/ng_atm.c optional ngatm_atm netgraph/atm/ngatmbase.c optional ngatm_atmbase \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscfu/ng_sscfu.c optional ngatm_sscfu \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/sscop/ng_sscop.c optional ngatm_sscop \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/atm/uni/ng_uni.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" netgraph/bluetooth/common/ng_bluetooth.c optional netgraph_bluetooth netgraph/bluetooth/drivers/bt3c/ng_bt3c_pccard.c optional netgraph_bluetooth_bt3c netgraph/bluetooth/drivers/h4/ng_h4.c optional netgraph_bluetooth_h4 netgraph/bluetooth/drivers/ubt/ng_ubt.c optional netgraph_bluetooth_ubt netgraph/bluetooth/drivers/ubtbcmfw/ubtbcmfw.c optional netgraph_bluetooth_ubtbcmfw netgraph/bluetooth/hci/ng_hci_cmds.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_evnt.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_main.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_misc.c optional netgraph_bluetooth_hci netgraph/bluetooth/hci/ng_hci_ulpi.c optional netgraph_bluetooth_hci netgraph/bluetooth/l2cap/ng_l2cap_cmds.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_evnt.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_llpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_main.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_misc.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/l2cap/ng_l2cap_ulpi.c optional netgraph_bluetooth_l2cap netgraph/bluetooth/socket/ng_btsocket.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_hci_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_l2cap_raw.c optional netgraph_bluetooth_socket netgraph/bluetooth/socket/ng_btsocket_rfcomm.c optional netgraph_bluetooth_socket netgraph/netflow/netflow.c optional netgraph_netflow netgraph/netflow/ng_netflow.c optional netgraph_netflow netgraph/ng_UI.c optional netgraph_UI netgraph/ng_async.c optional netgraph_async netgraph/ng_atmllc.c optional netgraph_atmllc netgraph/ng_base.c optional netgraph netgraph/ng_bpf.c optional netgraph_bpf netgraph/ng_bridge.c optional netgraph_bridge netgraph/ng_car.c optional netgraph_car netgraph/ng_cisco.c optional netgraph_cisco netgraph/ng_deflate.c optional netgraph_deflate netgraph/ng_device.c optional netgraph_device netgraph/ng_echo.c optional netgraph_echo netgraph/ng_eiface.c optional netgraph_eiface netgraph/ng_ether.c optional netgraph_ether netgraph/ng_fec.c optional netgraph_fec netgraph/ng_frame_relay.c optional netgraph_frame_relay netgraph/ng_gif.c optional netgraph_gif netgraph/ng_gif_demux.c optional netgraph_gif_demux netgraph/ng_hole.c optional netgraph_hole netgraph/ng_iface.c optional netgraph_iface netgraph/ng_ip_input.c optional netgraph_ip_input netgraph/ng_ipfw.c optional netgraph_ipfw netgraph/ng_ksocket.c optional netgraph_ksocket netgraph/ng_l2tp.c optional netgraph_l2tp netgraph/ng_lmi.c optional netgraph_lmi netgraph/ng_mppc.c optional netgraph_mppc_compression | \ netgraph_mppc_encryption netgraph/ng_nat.c optional netgraph_nat netgraph/ng_one2many.c optional netgraph_one2many netgraph/ng_parse.c optional netgraph netgraph/ng_ppp.c optional netgraph_ppp netgraph/ng_pppoe.c optional netgraph_pppoe netgraph/ng_pptpgre.c optional netgraph_pptpgre netgraph/ng_pred1.c optional netgraph_pred1 netgraph/ng_rfc1490.c optional netgraph_rfc1490 netgraph/ng_socket.c optional netgraph_socket netgraph/ng_split.c optional netgraph_split netgraph/ng_sppp.c optional netgraph_sppp netgraph/ng_tag.c optional netgraph_tag netgraph/ng_tcpmss.c optional netgraph_tcpmss netgraph/ng_tee.c optional netgraph_tee netgraph/ng_tty.c optional netgraph_tty netgraph/ng_vjc.c optional netgraph_vjc netinet/accf_data.c optional accept_filter_data netinet/accf_http.c optional accept_filter_http netinet/if_atm.c optional atm netinet/if_ether.c optional ether netinet/igmp.c optional inet netinet/in.c optional inet netinet/ip_carp.c optional carp netinet/in_gif.c optional gif inet netinet/ip_gre.c optional gre inet netinet/ip_id.c optional inet netinet/in_mcast.c optional inet netinet/in_pcb.c optional inet netinet/in_proto.c optional inet \ compile-with "${NORMAL_C} -I$S/contrib/pf" netinet/in_rmx.c optional inet netinet/ip_divert.c optional ipdivert netinet/ip_dummynet.c optional dummynet netinet/ip_ecn.c optional inet | inet6 netinet/ip_encap.c optional inet | inet6 netinet/ip_fastfwd.c optional inet netinet/ip_fw2.c optional ipfirewall \ compile-with "${NORMAL_C} -I$S/contrib/pf" netinet/ip_fw_pfil.c optional ipfirewall netinet/ip_icmp.c optional inet netinet/ip_input.c optional inet netinet/ip_ipsec.c optional ipsec netinet/ip_mroute.c optional mrouting inet | mrouting inet6 netinet/ip_options.c optional inet netinet/ip_output.c optional inet netinet/raw_ip.c optional inet netinet/sctp_asconf.c optional inet inet6 sctp netinet/sctp_auth.c optional inet inet6 sctp netinet/sctp_bsd_addr.c optional inet inet6 sctp netinet/sctp_cc_functions.c optional inet inet6 sctp netinet/sctp_crc32.c optional inet inet6 sctp netinet/sctp_indata.c optional inet inet6 sctp netinet/sctp_input.c optional inet inet6 sctp netinet/sctp_output.c optional inet inet6 sctp netinet/sctp_pcb.c optional inet inet6 sctp netinet/sctp_peeloff.c optional inet inet6 sctp netinet/sctp_sysctl.c optional inet inet6 sctp netinet/sctp_timer.c optional inet inet6 sctp netinet/sctp_usrreq.c optional inet inet6 sctp netinet/sctputil.c optional inet inet6 sctp netinet/tcp_debug.c optional tcpdebug netinet/tcp_hostcache.c optional inet netinet/tcp_input.c optional inet netinet/tcp_output.c optional inet netinet/tcp_offload.c optional inet netinet/tcp_reass.c optional inet netinet/tcp_sack.c optional inet netinet/tcp_subr.c optional inet netinet/tcp_syncache.c optional inet netinet/tcp_timer.c optional inet netinet/tcp_timewait.c optional inet netinet/tcp_usrreq.c optional inet netinet/udp_usrreq.c optional inet netinet/libalias/alias.c optional libalias | netgraph_nat netinet/libalias/alias_db.c optional libalias | netgraph_nat netinet/libalias/alias_mod.c optional libalias | netgraph_nat netinet/libalias/alias_proxy.c optional libalias | netgraph_nat netinet/libalias/alias_util.c optional libalias | netgraph_nat netinet6/dest6.c optional inet6 netinet6/frag6.c optional inet6 netinet6/icmp6.c optional inet6 netinet6/in6.c optional inet6 netinet6/in6_cksum.c optional inet6 netinet6/in6_gif.c optional gif inet6 netinet6/in6_ifattach.c optional inet6 netinet6/in6_pcb.c optional inet6 netinet6/in6_proto.c optional inet6 netinet6/in6_rmx.c optional inet6 netinet6/in6_src.c optional inet6 netinet6/ip6_forward.c optional inet6 netinet6/ip6_id.c optional inet6 netinet6/ip6_input.c optional inet6 netinet6/ip6_mroute.c optional mrouting inet6 netinet6/ip6_output.c optional inet6 netinet6/ip6_ipsec.c optional inet6 ipsec netinet6/mld6.c optional inet6 netinet6/nd6.c optional inet6 netinet6/nd6_nbr.c optional inet6 netinet6/nd6_rtr.c optional inet6 netinet6/raw_ip6.c optional inet6 netinet6/route6.c optional inet6 netinet6/scope6.c optional inet6 netinet6/sctp6_usrreq.c optional inet6 sctp netinet6/udp6_usrreq.c optional inet6 netipsec/ipsec.c optional ipsec netipsec/ipsec_input.c optional ipsec netipsec/ipsec_mbuf.c optional ipsec netipsec/ipsec_output.c optional ipsec netipsec/key.c optional ipsec netipsec/key_debug.c optional ipsec netipsec/keysock.c optional ipsec netipsec/xform_ah.c optional ipsec netipsec/xform_esp.c optional ipsec netipsec/xform_ipcomp.c optional ipsec netipsec/xform_ipip.c optional ipsec netipsec/xform_tcp.c optional ipsec tcp_signature netipx/ipx.c optional ipx netipx/ipx_cksum.c optional ipx netipx/ipx_input.c optional ipx netipx/ipx_outputfl.c optional ipx netipx/ipx_pcb.c optional ipx netipx/ipx_proto.c optional ipx netipx/ipx_usrreq.c optional ipx netipx/spx_debug.c optional ipx netipx/spx_usrreq.c optional ipx netnatm/natm.c optional natm netnatm/natm_pcb.c optional natm netnatm/natm_proto.c optional natm netncp/ncp_conn.c optional ncp netncp/ncp_crypt.c optional ncp netncp/ncp_login.c optional ncp netncp/ncp_mod.c optional ncp netncp/ncp_ncp.c optional ncp netncp/ncp_nls.c optional ncp netncp/ncp_rq.c optional ncp netncp/ncp_sock.c optional ncp netncp/ncp_subr.c optional ncp netsmb/smb_conn.c optional netsmb netsmb/smb_crypt.c optional netsmb netsmb/smb_dev.c optional netsmb netsmb/smb_iod.c optional netsmb netsmb/smb_rq.c optional netsmb netsmb/smb_smb.c optional netsmb netsmb/smb_subr.c optional netsmb netsmb/smb_trantcp.c optional netsmb netsmb/smb_usr.c optional netsmb nfs/nfs_common.c optional nfsclient | nfsserver nfs4client/nfs4_dev.c optional nfsclient nfs4client/nfs4_idmap.c optional nfsclient nfs4client/nfs4_socket.c optional nfsclient nfs4client/nfs4_subs.c optional nfsclient nfs4client/nfs4_vfs_subs.c optional nfsclient nfs4client/nfs4_vfsops.c optional nfsclient nfs4client/nfs4_vn_subs.c optional nfsclient nfs4client/nfs4_vnops.c optional nfsclient nfsclient/bootp_subr.c optional bootp nfsclient nfsclient/krpc_subr.c optional bootp nfsclient nfsclient/nfs_bio.c optional nfsclient nfsclient/nfs_diskless.c optional nfsclient nfs_root nfsclient/nfs_node.c optional nfsclient nfsclient/nfs_socket.c optional nfsclient nfsclient/nfs_subs.c optional nfsclient nfsclient/nfs_nfsiod.c optional nfsclient nfsclient/nfs_vfsops.c optional nfsclient nfsclient/nfs_vnops.c optional nfsclient nfsclient/nfs_lock.c optional nfsclient nfsserver/nfs_serv.c optional nfsserver nfsserver/nfs_srvsock.c optional nfsserver nfsserver/nfs_srvcache.c optional nfsserver nfsserver/nfs_srvsubs.c optional nfsserver nfsserver/nfs_syscalls.c optional nfsserver # crypto support opencrypto/cast.c optional crypto | ipsec opencrypto/criov.c optional crypto opencrypto/crypto.c optional crypto opencrypto/cryptodev.c optional cryptodev opencrypto/cryptodev_if.m optional crypto opencrypto/cryptosoft.c optional crypto opencrypto/deflate.c optional crypto opencrypto/rmd160.c optional crypto | ipsec opencrypto/skipjack.c optional crypto opencrypto/xform.c optional crypto pci/alpm.c optional alpm pci pci/amdpm.c optional amdpm pci | nfpm pci pci/amdsmb.c optional amdsmb pci pci/if_mn.c optional mn pci pci/if_pcn.c optional pcn pci pci/if_rl.c optional rl pci pci/if_sf.c optional sf pci pci/if_sis.c optional sis pci pci/if_ste.c optional ste pci pci/if_tl.c optional tl pci pci/if_vr.c optional vr pci pci/if_wb.c optional wb pci pci/if_xl.c optional xl pci pci/intpm.c optional intpm pci pci/ncr.c optional ncr pci pci/nfsmb.c optional nfsmb pci pci/viapm.c optional viapm pci rpc/rpcclnt.c optional nfsclient security/audit/audit.c optional audit security/audit/audit_arg.c optional audit security/audit/audit_bsm.c optional audit security/audit/audit_bsm_klib.c optional audit security/audit/audit_bsm_token.c optional audit security/audit/audit_pipe.c optional audit security/audit/audit_syscalls.c standard security/audit/audit_trigger.c optional audit security/audit/audit_worker.c optional audit security/mac/mac_atalk.c optional mac netatalk security/mac/mac_audit.c optional mac audit security/mac/mac_framework.c optional mac security/mac/mac_inet.c optional mac inet security/mac/mac_inet6.c optional mac inet6 security/mac/mac_label.c optional mac security/mac/mac_net.c optional mac security/mac/mac_pipe.c optional mac security/mac/mac_posix_sem.c optional mac security/mac/mac_priv.c optional mac security/mac/mac_process.c optional mac security/mac/mac_socket.c optional mac security/mac/mac_syscalls.c standard security/mac/mac_system.c optional mac security/mac/mac_sysv_msg.c optional mac security/mac/mac_sysv_sem.c optional mac security/mac/mac_sysv_shm.c optional mac security/mac/mac_vfs.c optional mac security/mac_biba/mac_biba.c optional mac_biba security/mac_bsdextended/mac_bsdextended.c optional mac_bsdextended security/mac_ifoff/mac_ifoff.c optional mac_ifoff security/mac_lomac/mac_lomac.c optional mac_lomac security/mac_mls/mac_mls.c optional mac_mls security/mac_none/mac_none.c optional mac_none security/mac_partition/mac_partition.c optional mac_partition security/mac_portacl/mac_portacl.c optional mac_portacl security/mac_seeotheruids/mac_seeotheruids.c optional mac_seeotheruids security/mac_stub/mac_stub.c optional mac_stub security/mac_test/mac_test.c optional mac_test ufs/ffs/ffs_alloc.c optional ffs ufs/ffs/ffs_balloc.c optional ffs ufs/ffs/ffs_inode.c optional ffs ufs/ffs/ffs_snapshot.c optional ffs ufs/ffs/ffs_softdep.c optional ffs ufs/ffs/ffs_subr.c optional ffs ufs/ffs/ffs_tables.c optional ffs ufs/ffs/ffs_vfsops.c optional ffs ufs/ffs/ffs_vnops.c optional ffs ufs/ffs/ffs_rawread.c optional directio ufs/ufs/ufs_acl.c optional ffs ufs/ufs/ufs_bmap.c optional ffs ufs/ufs/ufs_dirhash.c optional ffs ufs/ufs/ufs_extattr.c optional ffs ufs/ufs/ufs_gjournal.c optional ffs ufs/ufs/ufs_inode.c optional ffs ufs/ufs/ufs_lookup.c optional ffs ufs/ufs/ufs_quota.c optional ffs ufs/ufs/ufs_vfsops.c optional ffs ufs/ufs/ufs_vnops.c optional ffs vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/redzone.c optional DEBUG_REDZONE vm/swap_pager.c standard vm/uma_core.c standard vm/uma_dbg.c standard vm/vm_contig.c standard vm/memguard.c optional DEBUG_MEMGUARD vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard vm/vm_kern.c standard vm/vm_map.c standard vm/vm_meter.c standard vm/vm_mmap.c standard vm/vm_object.c standard vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pageq.c standard vm/vm_pager.c standard vm/vm_phys.c standard +vm/vm_reserv.c standard vm/vm_unix.c standard vm/vm_zeroidle.c standard vm/vnode_pager.c standard # gnu/fs/xfs/xfs_alloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" \ warning "kernel contains GPL contaminated xfs filesystem" gnu/fs/xfs/xfs_alloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bit.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_bmap_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_buf_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_da_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_block.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_data.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_node.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_sf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir2_trace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dir_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_error.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_extfree_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_fsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_ialloc_btree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_inode_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iocore.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_itable.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dfrag.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_log_recover.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_mount.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rename.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_ail.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_extfree.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_inode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_trans_item.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_utils.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vfsops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_vnodeops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_rw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr_leaf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_attr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_dmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_qmops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_freebsd_iget.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_mountops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnops.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_frw.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_buf.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_globals.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_dmistubs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_super.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_stats.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vfs.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_vnode.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_sysctl.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_fs_subr.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/xfs_ioctl.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/debug.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/ktrace.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/mrlock.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/uuid.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/FreeBSD/support/kmem.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_iomap.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" gnu/fs/xfs/xfs_behavior.c optional xfs \ compile-with "${NORMAL_C} -I$S/gnu/fs/xfs/FreeBSD -I$S/gnu/fs/xfs/FreeBSD/support -I$S/gnu/fs/xfs" Index: head/sys/kern/kern_exec.c =================================================================== --- head/sys/kern/kern_exec.c (revision 174981) +++ head/sys/kern/kern_exec.c (revision 174982) @@ -1,1303 +1,1310 @@ /*- * Copyright (c) 1993, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_ktrace.h" #include "opt_mac.h" +#include "opt_vm.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef KTRACE #include #endif #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments"); static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS); static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS); static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS); static int do_execve(struct thread *td, struct image_args *args, struct mac *mac_p); static void exec_free_args(struct image_args *); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD, NULL, 0, sysctl_kern_ps_strings, "LU", ""); /* XXX This should be vm_size_t. */ SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD, NULL, 0, sysctl_kern_usrstack, "LU", ""); SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD, NULL, 0, sysctl_kern_stackprot, "I", ""); u_long ps_arg_cache_limit = PAGE_SIZE / 16; SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW, &ps_arg_cache_limit, 0, ""); static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_psstrings; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings, sizeof(p->p_sysent->sv_psstrings)); return error; } static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS) { struct proc *p; int error; p = curproc; #ifdef SCTL_MASK32 if (req->flags & SCTL_MASK32) { unsigned int val; val = (unsigned int)p->p_sysent->sv_usrstack; error = SYSCTL_OUT(req, &val, sizeof(val)); } else #endif error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack, sizeof(p->p_sysent->sv_usrstack)); return error; } static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS) { struct proc *p; p = curproc; return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot, sizeof(p->p_sysent->sv_stackprot))); } /* * Each of the items is a pointer to a `const struct execsw', hence the * double pointer here. */ static const struct execsw **execsw; #ifndef _SYS_SYSPROTO_H_ struct execve_args { char *fname; char **argv; char **envv; }; #endif int execve(td, uap) struct thread *td; struct execve_args /* { char *fname; char **argv; char **envv; } */ *uap; { int error; struct image_args args; error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, NULL); return (error); } #ifndef _SYS_SYSPROTO_H_ struct __mac_execve_args { char *fname; char **argv; char **envv; struct mac *mac_p; }; #endif int __mac_execve(td, uap) struct thread *td; struct __mac_execve_args /* { char *fname; char **argv; char **envv; struct mac *mac_p; } */ *uap; { #ifdef MAC int error; struct image_args args; error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE, uap->argv, uap->envv); if (error == 0) error = kern_execve(td, &args, uap->mac_p); return (error); #else return (ENOSYS); #endif } /* * XXX: kern_execve has the astonishing property of not always returning to * the caller. If sufficiently bad things happen during the call to * do_execve(), it can end up calling exit1(); as a result, callers must * avoid doing anything which they might need to undo (e.g., allocating * memory). */ int kern_execve(td, args, mac_p) struct thread *td; struct image_args *args; struct mac *mac_p; { struct proc *p = td->td_proc; int error; AUDIT_ARG(argv, args->begin_argv, args->argc, args->begin_envv - args->begin_argv); AUDIT_ARG(envv, args->begin_envv, args->envc, args->endp - args->begin_envv); if (p->p_flag & P_HADTHREADS) { PROC_LOCK(p); if (thread_single(SINGLE_BOUNDARY)) { PROC_UNLOCK(p); exec_free_args(args); return (ERESTART); /* Try again later. */ } PROC_UNLOCK(p); } error = do_execve(td, args, mac_p); if (p->p_flag & P_HADTHREADS) { PROC_LOCK(p); /* * If success, we upgrade to SINGLE_EXIT state to * force other threads to suicide. */ if (error == 0) thread_single(SINGLE_EXIT); else thread_single_end(); PROC_UNLOCK(p); } return (error); } /* * In-kernel implementation of execve(). All arguments are assumed to be * userspace pointers from the passed thread. */ static int do_execve(td, args, mac_p) struct thread *td; struct image_args *args; struct mac *mac_p; { struct proc *p = td->td_proc; struct nameidata nd, *ndp; struct ucred *newcred = NULL, *oldcred; struct uidinfo *euip; register_t *stack_base; int error, len, i; struct image_params image_params, *imgp; struct vattr attr; int (*img_first)(struct image_params *); struct pargs *oldargs = NULL, *newargs = NULL; struct sigacts *oldsigacts, *newsigacts; #ifdef KTRACE struct vnode *tracevp = NULL; struct ucred *tracecred = NULL; #endif struct vnode *textvp = NULL; int credential_changing; int vfslocked; int textset; #ifdef MAC struct label *interplabel = NULL; int will_transition; #endif #ifdef HWPMC_HOOKS struct pmckern_procexec pe; #endif vfslocked = 0; imgp = &image_params; /* * Lock the process and set the P_INEXEC flag to indicate that * it should be left alone until we're done here. This is * necessary to avoid race conditions - e.g. in ptrace() - * that might allow a local user to illicitly obtain elevated * privileges. */ PROC_LOCK(p); KASSERT((p->p_flag & P_INEXEC) == 0, ("%s(): process already has P_INEXEC flag", __func__)); p->p_flag |= P_INEXEC; PROC_UNLOCK(p); /* * Initialize part of the common data */ imgp->proc = p; imgp->execlabel = NULL; imgp->attr = &attr; imgp->entry_addr = 0; imgp->vmspace_destroyed = 0; imgp->interpreted = 0; imgp->interpreter_name = args->buf + PATH_MAX + ARG_MAX; imgp->auxargs = NULL; imgp->vp = NULL; imgp->object = NULL; imgp->firstpage = NULL; imgp->ps_strings = 0; imgp->auxarg_size = 0; imgp->args = args; #ifdef MAC error = mac_execve_enter(imgp, mac_p); if (error) goto exec_fail; #endif imgp->image_header = NULL; /* * Translate the file name. namei() returns a vnode pointer * in ni_vp amoung other things. * * XXXAUDIT: It would be desirable to also audit the name of the * interpreter if this is an interpreted binary. */ ndp = &nd; NDINIT(ndp, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME | MPSAFE | AUDITVNODE1, UIO_SYSSPACE, args->fname, td); interpret: error = namei(ndp); if (error) goto exec_fail; vfslocked = NDHASGIANT(ndp); imgp->vp = ndp->ni_vp; /* * Check file permissions (also 'opens' file) */ error = exec_check_permissions(imgp); if (error) goto exec_fail_dealloc; imgp->object = imgp->vp->v_object; if (imgp->object != NULL) vm_object_reference(imgp->object); /* * Set VV_TEXT now so no one can write to the executable while we're * activating it. * * Remember if this was set before and unset it in case this is not * actually an executable image. */ textset = imgp->vp->v_vflag & VV_TEXT; imgp->vp->v_vflag |= VV_TEXT; error = exec_map_first_page(imgp); if (error) goto exec_fail_dealloc; imgp->proc->p_osrel = 0; /* * If the current process has a special image activator it * wants to try first, call it. For example, emulating shell * scripts differently. */ error = -1; if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL) error = img_first(imgp); /* * Loop through the list of image activators, calling each one. * An activator returns -1 if there is no match, 0 on success, * and an error otherwise. */ for (i = 0; error == -1 && execsw[i]; ++i) { if (execsw[i]->ex_imgact == NULL || execsw[i]->ex_imgact == img_first) { continue; } error = (*execsw[i]->ex_imgact)(imgp); } if (error) { if (error == -1) { if (textset == 0) imgp->vp->v_vflag &= ~VV_TEXT; error = ENOEXEC; } goto exec_fail_dealloc; } /* * Special interpreter operation, cleanup and loop up to try to * activate the interpreter. */ if (imgp->interpreted) { exec_unmap_first_page(imgp); /* * VV_TEXT needs to be unset for scripts. There is a short * period before we determine that something is a script where * VV_TEXT will be set. The vnode lock is held over this * entire period so nothing should illegitimately be blocked. */ imgp->vp->v_vflag &= ~VV_TEXT; /* free name buffer and old vnode */ NDFREE(ndp, NDF_ONLY_PNBUF); #ifdef MAC interplabel = mac_vnode_label_alloc(); mac_vnode_copy_label(ndp->ni_vp->v_label, interplabel); #endif vput(ndp->ni_vp); vm_object_deallocate(imgp->object); imgp->object = NULL; VFS_UNLOCK_GIANT(vfslocked); vfslocked = 0; /* set new name to that of the interpreter */ NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME | MPSAFE, UIO_SYSSPACE, imgp->interpreter_name, td); goto interpret; } /* * Copy out strings (args and env) and initialize stack base */ if (p->p_sysent->sv_copyout_strings) stack_base = (*p->p_sysent->sv_copyout_strings)(imgp); else stack_base = exec_copyout_strings(imgp); /* * If custom stack fixup routine present for this process * let it do the stack setup. * Else stuff argument count as first item on stack */ if (p->p_sysent->sv_fixup != NULL) (*p->p_sysent->sv_fixup)(&stack_base, imgp); else suword(--stack_base, imgp->args->argc); /* * For security and other reasons, the file descriptor table cannot * be shared after an exec. */ fdunshare(p, td); /* * Malloc things before we need locks. */ newcred = crget(); euip = uifind(attr.va_uid); i = imgp->args->begin_envv - imgp->args->begin_argv; /* Cache arguments if they fit inside our allowance */ if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { newargs = pargs_alloc(i); bcopy(imgp->args->begin_argv, newargs->ar_args, i); } /* close files on exec */ VOP_UNLOCK(imgp->vp, 0, td); fdcloseexec(td); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); /* Get a reference to the vnode prior to locking the proc */ VREF(ndp->ni_vp); /* * For security and other reasons, signal handlers cannot * be shared after an exec. The new process gets a copy of the old * handlers. In execsigs(), the new process will have its signals * reset. */ PROC_LOCK(p); if (sigacts_shared(p->p_sigacts)) { oldsigacts = p->p_sigacts; PROC_UNLOCK(p); newsigacts = sigacts_alloc(); sigacts_copy(newsigacts, oldsigacts); PROC_LOCK(p); p->p_sigacts = newsigacts; } else oldsigacts = NULL; /* Stop profiling */ stopprofclock(p); /* reset caught signals */ execsigs(p); /* name this process - nameiexec(p, ndp) */ len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN); bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len); p->p_comm[len] = 0; bcopy(p->p_comm, td->td_name, sizeof(td->td_name)); /* * mark as execed, wakeup the process that vforked (if any) and tell * it that it now has its own resources back */ p->p_flag |= P_EXEC; if (p->p_pptr && (p->p_flag & P_PPWAIT)) { p->p_flag &= ~P_PPWAIT; wakeup(p->p_pptr); } /* * Implement image setuid/setgid. * * Don't honor setuid/setgid if the filesystem prohibits it or if * the process is being traced. * * XXXMAC: For the time being, use NOSUID to also prohibit * transitions on the file system. */ oldcred = p->p_ucred; credential_changing = 0; credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid != attr.va_uid; credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid != attr.va_gid; #ifdef MAC will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp, interplabel, imgp); credential_changing |= will_transition; #endif if (credential_changing && (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 && (p->p_flag & P_TRACED) == 0) { /* * Turn off syscall tracing for set-id programs, except for * root. Record any set-id flags first to make sure that * we do not regain any tracing during a possible block. */ setsugid(p); #ifdef KTRACE if (p->p_tracevp != NULL && priv_check_cred(oldcred, PRIV_DEBUG_DIFFCRED, 0)) { mtx_lock(&ktrace_mtx); p->p_traceflag = 0; tracevp = p->p_tracevp; p->p_tracevp = NULL; tracecred = p->p_tracecred; p->p_tracecred = NULL; mtx_unlock(&ktrace_mtx); } #endif /* * Close any file descriptors 0..2 that reference procfs, * then make sure file descriptors 0..2 are in use. * * setugidsafety() may call closef() and then pfind() * which may grab the process lock. * fdcheckstd() may call falloc() which may block to * allocate memory, so temporarily drop the process lock. */ PROC_UNLOCK(p); setugidsafety(td); VOP_UNLOCK(imgp->vp, 0, td); error = fdcheckstd(td); vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); if (error != 0) goto done1; PROC_LOCK(p); /* * Set the new credentials. */ crcopy(newcred, oldcred); if (attr.va_mode & VSUID) change_euid(newcred, euip); if (attr.va_mode & VSGID) change_egid(newcred, attr.va_gid); #ifdef MAC if (will_transition) { mac_vnode_execve_transition(oldcred, newcred, imgp->vp, interplabel, imgp); } #endif /* * Implement correct POSIX saved-id behavior. * * XXXMAC: Note that the current logic will save the * uid and gid if a MAC domain transition occurs, even * though maybe it shouldn't. */ change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); p->p_ucred = newcred; newcred = NULL; } else { if (oldcred->cr_uid == oldcred->cr_ruid && oldcred->cr_gid == oldcred->cr_rgid) p->p_flag &= ~P_SUGID; /* * Implement correct POSIX saved-id behavior. * * XXX: It's not clear that the existing behavior is * POSIX-compliant. A number of sources indicate that the * saved uid/gid should only be updated if the new ruid is * not equal to the old ruid, or the new euid is not equal * to the old euid and the new euid is not equal to the old * ruid. The FreeBSD code always updates the saved uid/gid. * Also, this code uses the new (replaced) euid and egid as * the source, which may or may not be the right ones to use. */ if (oldcred->cr_svuid != oldcred->cr_uid || oldcred->cr_svgid != oldcred->cr_gid) { crcopy(newcred, oldcred); change_svuid(newcred, newcred->cr_uid); change_svgid(newcred, newcred->cr_gid); p->p_ucred = newcred; newcred = NULL; } } /* * Store the vp for use in procfs. This vnode was referenced prior * to locking the proc lock. */ textvp = p->p_textvp; p->p_textvp = ndp->ni_vp; /* * Notify others that we exec'd, and clear the P_INEXEC flag * as we're now a bona fide freshly-execed process. */ KNOTE_LOCKED(&p->p_klist, NOTE_EXEC); p->p_flag &= ~P_INEXEC; /* * If tracing the process, trap to debugger so breakpoints * can be set before the program executes. * Use tdsignal to deliver signal to current thread, use * psignal may cause the signal to be delivered to wrong thread * because that thread will exit, remember we are going to enter * single thread mode. */ if (p->p_flag & P_TRACED) tdsignal(p, td, SIGTRAP, NULL); /* clear "fork but no exec" flag, as we _are_ execing */ p->p_acflag &= ~AFORK; /* * Free any previous argument cache and replace it with * the new argument cache, if any. */ oldargs = p->p_args; p->p_args = newargs; newargs = NULL; #ifdef HWPMC_HOOKS /* * Check if system-wide sampling is in effect or if the * current process is using PMCs. If so, do exec() time * processing. This processing needs to happen AFTER the * P_INEXEC flag is cleared. * * The proc lock needs to be released before taking the PMC * SX. */ if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) { PROC_UNLOCK(p); pe.pm_credentialschanged = credential_changing; pe.pm_entryaddr = imgp->entry_addr; PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe); } else PROC_UNLOCK(p); #else /* !HWPMC_HOOKS */ PROC_UNLOCK(p); #endif /* Set values passed into the program in registers. */ if (p->p_sysent->sv_setregs) (*p->p_sysent->sv_setregs)(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, imgp->ps_strings); else exec_setregs(td, imgp->entry_addr, (u_long)(uintptr_t)stack_base, imgp->ps_strings); vfs_mark_atime(imgp->vp, td); done1: /* * Free any resources malloc'd earlier that we didn't use. */ uifree(euip); if (newcred == NULL) crfree(oldcred); else crfree(newcred); VOP_UNLOCK(imgp->vp, 0, td); /* * Handle deferred decrement of ref counts. */ if (textvp != NULL) { int tvfslocked; tvfslocked = VFS_LOCK_GIANT(textvp->v_mount); vrele(textvp); VFS_UNLOCK_GIANT(tvfslocked); } if (ndp->ni_vp && error != 0) vrele(ndp->ni_vp); #ifdef KTRACE if (tracevp != NULL) { int tvfslocked; tvfslocked = VFS_LOCK_GIANT(tracevp->v_mount); vrele(tracevp); VFS_UNLOCK_GIANT(tvfslocked); } if (tracecred != NULL) crfree(tracecred); #endif vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); if (oldargs != NULL) pargs_drop(oldargs); if (newargs != NULL) pargs_drop(newargs); if (oldsigacts != NULL) sigacts_free(oldsigacts); exec_fail_dealloc: /* * free various allocated resources */ if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); if (imgp->vp != NULL) { NDFREE(ndp, NDF_ONLY_PNBUF); vput(imgp->vp); } if (imgp->object != NULL) vm_object_deallocate(imgp->object); if (error == 0) { /* * Stop the process here if its stop event mask has * the S_EXEC bit set. */ STOPEVENT(p, S_EXEC, 0); goto done2; } exec_fail: /* we're done here, clear P_INEXEC */ PROC_LOCK(p); p->p_flag &= ~P_INEXEC; PROC_UNLOCK(p); done2: #ifdef MAC mac_execve_exit(imgp); if (interplabel != NULL) mac_vnode_label_free(interplabel); #endif VFS_UNLOCK_GIANT(vfslocked); exec_free_args(args); if (error && imgp->vmspace_destroyed) { /* sorry, no more process anymore. exit gracefully */ exit1(td, W_EXITCODE(0, SIGABRT)); /* NOT REACHED */ } return (error); } int exec_map_first_page(imgp) struct image_params *imgp; { int rv, i; int initial_pagein; vm_page_t ma[VM_INITIAL_PAGEIN]; vm_object_t object; if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); object = imgp->vp->v_object; if (object == NULL) return (EACCES); VM_OBJECT_LOCK(object); +#if VM_NRESERVLEVEL > 0 + if ((object->flags & OBJ_COLORED) == 0) { + object->flags |= OBJ_COLORED; + object->pg_color = 0; + } +#endif ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { initial_pagein = VM_INITIAL_PAGEIN; if (initial_pagein > object->size) initial_pagein = object->size; for (i = 1; i < initial_pagein; i++) { if ((ma[i] = vm_page_lookup(object, i)) != NULL) { if (ma[i]->valid) break; if ((ma[i]->oflags & VPO_BUSY) || ma[i]->busy) break; vm_page_busy(ma[i]); } else { ma[i] = vm_page_alloc(object, i, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (ma[i] == NULL) break; } } initial_pagein = i; rv = vm_pager_get_pages(object, ma, initial_pagein, 0); ma[0] = vm_page_lookup(object, 0); if ((rv != VM_PAGER_OK) || (ma[0] == NULL) || (ma[0]->valid == 0)) { if (ma[0]) { vm_page_lock_queues(); vm_page_free(ma[0]); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(object); return (EIO); } } vm_page_lock_queues(); vm_page_hold(ma[0]); vm_page_unlock_queues(); vm_page_wakeup(ma[0]); VM_OBJECT_UNLOCK(object); imgp->firstpage = sf_buf_alloc(ma[0], 0); imgp->image_header = (char *)sf_buf_kva(imgp->firstpage); return (0); } void exec_unmap_first_page(imgp) struct image_params *imgp; { vm_page_t m; if (imgp->firstpage != NULL) { m = sf_buf_page(imgp->firstpage); sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; vm_page_lock_queues(); vm_page_unhold(m); vm_page_unlock_queues(); } } /* * Destroy old address space, and allocate a new stack * The new stack is only SGROWSIZ large because it is grown * automatically in trap.c. */ int exec_new_vmspace(imgp, sv) struct image_params *imgp; struct sysentvec *sv; { int error; struct proc *p = imgp->proc; struct vmspace *vmspace = p->p_vmspace; vm_offset_t stack_addr; vm_map_t map; u_long ssiz; imgp->vmspace_destroyed = 1; imgp->sysent = sv; /* May be called with Giant held */ EVENTHANDLER_INVOKE(process_exec, p, imgp); /* * Blow away entire process VM, if address space not shared, * otherwise, create a new VM space so that other threads are * not disrupted */ map = &vmspace->vm_map; if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser && vm_map_max(map) == sv->sv_maxuser) { shmexit(vmspace); pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); } else { error = vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser); if (error) return (error); vmspace = p->p_vmspace; map = &vmspace->vm_map; } /* Allocate a new stack */ if (sv->sv_maxssiz != NULL) ssiz = *sv->sv_maxssiz; else ssiz = maxssiz; stack_addr = sv->sv_usrstack - ssiz; error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN); if (error) return (error); #ifdef __ia64__ /* Allocate a new register stack */ stack_addr = IA64_BACKINGSTORE; error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP); if (error) return (error); #endif /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the * VM_STACK case, but they are still used to monitor the size of the * process stack so we can check the stack rlimit. */ vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT; vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - ssiz; return (0); } /* * Copy out argument and environment strings from the old process address * space into the temporary string buffer. */ int exec_copyin_args(struct image_args *args, char *fname, enum uio_seg segflg, char **argv, char **envv) { char *argp, *envp; int error; size_t length; error = 0; bzero(args, sizeof(*args)); if (argv == NULL) return (EFAULT); /* * Allocate temporary demand zeroed space for argument and * environment strings: * * o ARG_MAX for argument and environment; * o MAXSHELLCMDLEN for the name of interpreters. */ args->buf = (char *) kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); if (args->buf == NULL) return (ENOMEM); args->begin_argv = args->buf; args->endp = args->begin_argv; args->stringspace = ARG_MAX; args->fname = args->buf + ARG_MAX; /* * Copy the file name. */ error = (segflg == UIO_SYSSPACE) ? copystr(fname, args->fname, PATH_MAX, &length) : copyinstr(fname, args->fname, PATH_MAX, &length); if (error != 0) goto err_exit; /* * extract arguments first */ while ((argp = (caddr_t) (intptr_t) fuword(argv++))) { if (argp == (caddr_t) -1) { error = EFAULT; goto err_exit; } if ((error = copyinstr(argp, args->endp, args->stringspace, &length))) { if (error == ENAMETOOLONG) error = E2BIG; goto err_exit; } args->stringspace -= length; args->endp += length; args->argc++; } args->begin_envv = args->endp; /* * extract environment strings */ if (envv) { while ((envp = (caddr_t)(intptr_t)fuword(envv++))) { if (envp == (caddr_t)-1) { error = EFAULT; goto err_exit; } if ((error = copyinstr(envp, args->endp, args->stringspace, &length))) { if (error == ENAMETOOLONG) error = E2BIG; goto err_exit; } args->stringspace -= length; args->endp += length; args->envc++; } } return (0); err_exit: exec_free_args(args); return (error); } static void exec_free_args(struct image_args *args) { if (args->buf) { kmem_free_wakeup(exec_map, (vm_offset_t)args->buf, PATH_MAX + ARG_MAX + MAXSHELLCMDLEN); args->buf = NULL; } } /* * Copy strings out to the new process address space, constructing new arg * and env vector tables. Return a pointer to the base so that it can be used * as the initial stack pointer. */ register_t * exec_copyout_strings(imgp) struct image_params *imgp; { int argc, envc; char **vectp; char *stringp, *destp; register_t *stack_base; struct ps_strings *arginfo; struct proc *p; int szsigcode; /* * Calculate string base and vector table pointers. * Also deal with signal trampoline code for this exec type. */ p = imgp->proc; szsigcode = 0; arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings; if (p->p_sysent->sv_szsigcode != NULL) szsigcode = *(p->p_sysent->sv_szsigcode); destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE - roundup((ARG_MAX - imgp->args->stringspace), sizeof(char *)); /* * install sigcode */ if (szsigcode) copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo - szsigcode), szsigcode); /* * If we have a valid auxargs ptr, prepare some room * on the stack. */ if (imgp->auxargs) { /* * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for * lower compatibility. */ imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size : (AT_COUNT * 2); /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets,and imgp->auxarg_size is room * for argument of Runtime loader. */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2 + imgp->auxarg_size) * sizeof(char *)); } else { /* * The '+ 2' is for the null pointers at the end of each of * the arg and env vector sets */ vectp = (char **)(destp - (imgp->args->argc + imgp->args->envc + 2) * sizeof(char *)); } /* * vectp also becomes our initial stack base */ stack_base = (register_t *)vectp; stringp = imgp->args->begin_argv; argc = imgp->args->argc; envc = imgp->args->envc; /* * Copy out strings - arguments and environment. */ copyout(stringp, destp, ARG_MAX - imgp->args->stringspace); /* * Fill in "ps_strings" struct for ps, w, etc. */ suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nargvstr, argc); /* * Fill in argument portion of vector table. */ for (; argc > 0; --argc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* a null vector table pointer separates the argp's from the envp's */ suword(vectp++, 0); suword(&arginfo->ps_envstr, (long)(intptr_t)vectp); suword(&arginfo->ps_nenvstr, envc); /* * Fill in environment portion of vector table. */ for (; envc > 0; --envc) { suword(vectp++, (long)(intptr_t)destp); while (*stringp++ != 0) destp++; destp++; } /* end of vector table is a null pointer */ suword(vectp, 0); return (stack_base); } /* * Check permissions of file to execute. * Called with imgp->vp locked. * Return 0 for success or error code on failure. */ int exec_check_permissions(imgp) struct image_params *imgp; { struct vnode *vp = imgp->vp; struct vattr *attr = imgp->attr; struct thread *td; int error; td = curthread; /* XXXKSE */ /* Get file attributes */ error = VOP_GETATTR(vp, attr, td->td_ucred, td); if (error) return (error); #ifdef MAC error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp); if (error) return (error); #endif /* * 1) Check if file execution is disabled for the filesystem that this * file resides on. * 2) Insure that at least one execute bit is on - otherwise root * will always succeed, and we don't want to happen unless the * file really is executable. * 3) Insure that the file is a regular file. */ if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || ((attr->va_mode & 0111) == 0) || (attr->va_type != VREG)) return (EACCES); /* * Zero length files can't be exec'd */ if (attr->va_size == 0) return (ENOEXEC); /* * Check for execute permission to file based on current credentials. */ error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td); if (error) return (error); /* * Check number of open-for-writes on the file and deny execution * if there are any. */ if (vp->v_writecount) return (ETXTBSY); /* * Call filesystem specific open routine (which does nothing in the * general case). */ error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL); return (error); } /* * Exec handler registration */ int exec_register(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 2; /* New slot and trailing NULL */ if (execsw) for (es = execsw; *es; es++) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; if (execsw) for (es = execsw; *es; es++) *xs++ = *es; *xs++ = execsw_arg; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } int exec_unregister(execsw_arg) const struct execsw *execsw_arg; { const struct execsw **es, **xs, **newexecsw; int count = 1; if (execsw == NULL) panic("unregister with no handlers left?\n"); for (es = execsw; *es; es++) { if (*es == execsw_arg) break; } if (*es == NULL) return (ENOENT); for (es = execsw; *es; es++) if (*es != execsw_arg) count++; newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK); if (newexecsw == NULL) return (ENOMEM); xs = newexecsw; for (es = execsw; *es; es++) if (*es != execsw_arg) *xs++ = *es; *xs = NULL; if (execsw) free(execsw, M_TEMP); execsw = newexecsw; return (0); } Index: head/sys/vm/vm_fault.c =================================================================== --- head/sys/vm/vm_fault.c (revision 174981) +++ head/sys/vm/vm_fault.c (revision 174982) @@ -1,1310 +1,1323 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Page fault handling module. */ #include __FBSDID("$FreeBSD$"); +#include "opt_vm.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX Temporary for VFS_LOCK_GIANT() */ #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) static int prefault_pageorder[] = { -1 * PAGE_SIZE, 1 * PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE, -3 * PAGE_SIZE, 3 * PAGE_SIZE, -4 * PAGE_SIZE, 4 * PAGE_SIZE }; static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *); static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t); #define VM_FAULT_READ_AHEAD 8 #define VM_FAULT_READ_BEHIND 7 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) struct faultstate { vm_page_t m; vm_object_t object; vm_pindex_t pindex; vm_page_t first_m; vm_object_t first_object; vm_pindex_t first_pindex; vm_map_t map; vm_map_entry_t entry; int lookup_still_valid; struct vnode *vp; }; static inline void release_page(struct faultstate *fs) { vm_page_wakeup(fs->m); vm_page_lock_queues(); vm_page_deactivate(fs->m); vm_page_unlock_queues(); fs->m = NULL; } static inline void unlock_map(struct faultstate *fs) { if (fs->lookup_still_valid) { vm_map_lookup_done(fs->map, fs->entry); fs->lookup_still_valid = FALSE; } } static void unlock_and_deallocate(struct faultstate *fs) { vm_object_pip_wakeup(fs->object); VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); vm_page_lock_queues(); vm_page_free(fs->first_m); vm_page_unlock_queues(); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); fs->first_m = NULL; } vm_object_deallocate(fs->first_object); unlock_map(fs); if (fs->vp != NULL) { int vfslocked; vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount); vput(fs->vp); fs->vp = NULL; VFS_UNLOCK_GIANT(vfslocked); } } /* * TRYPAGER - used by vm_fault to calculate whether the pager for the * current object *might* contain the page. * * default objects are zero-fill, there is no real pager. */ #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) /* * vm_fault: * * Handle a page fault occurring at the given address, * requiring the given permissions, in the map specified. * If successful, the page is inserted into the * associated physical map. * * NOTE: the given address should be truncated to the * proper page address. * * KERN_SUCCESS is returned if the page fault is handled; otherwise, * a standard error specifying why the fault is fatal is returned. * * * The map in question must be referenced, and remains so. * Caller may hold no locks. */ int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) { vm_prot_t prot; int is_first_object_locked, result; boolean_t growstack, wired; int map_generation; vm_object_t next_object; vm_page_t marray[VM_FAULT_READ]; int hardfault; int faultcount; struct faultstate fs; hardfault = 0; growstack = TRUE; PCPU_INC(cnt.v_vm_faults); RetryFault:; /* * Find the backing store object and offset into it to begin the * search. */ fs.map = map; result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) { if (result != KERN_PROTECTION_FAILURE || (fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) { if (growstack && result == KERN_INVALID_ADDRESS && map != kernel_map && curproc != NULL) { result = vm_map_growstack(curproc, vaddr); if (result != KERN_SUCCESS) return (KERN_FAILURE); growstack = FALSE; goto RetryFault; } return (result); } /* * If we are user-wiring a r/w segment, and it is COW, then * we need to do the COW operation. Note that we don't COW * currently RO sections now, because it is NOT desirable * to COW .text. We simply keep .text from ever being COW'ed * and take the heat that one cannot debug wired .text sections. */ result = vm_map_lookup(&fs.map, vaddr, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE, &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); if (result != KERN_SUCCESS) return (result); /* * If we don't COW now, on a user wire, the user will never * be able to write to the mapping. If we don't make this * restriction, the bookkeeping would be nearly impossible. * * XXX The following assignment modifies the map without * holding a write lock on it. */ if ((fs.entry->protection & VM_PROT_WRITE) == 0) fs.entry->max_protection &= ~VM_PROT_WRITE; } map_generation = fs.map->timestamp; if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { panic("vm_fault: fault on nofault entry, addr: %lx", (u_long)vaddr); } /* * Make a reference to this object to prevent its disposal while we * are messing with it. Once we have the reference, the map is free * to be diddled. Since objects reference their shadows (and copies), * they will stay around as well. * * Bump the paging-in-progress count to prevent size changes (e.g. * truncation operations) during I/O. This must be done after * obtaining the vnode lock in order to avoid possible deadlocks. * * XXX vnode_pager_lock() can block without releasing the map lock. */ if (fs.first_object->flags & OBJ_NEEDGIANT) mtx_lock(&Giant); VM_OBJECT_LOCK(fs.first_object); vm_object_reference_locked(fs.first_object); fs.vp = vnode_pager_lock(fs.first_object); KASSERT(fs.vp == NULL || !fs.map->system_map, ("vm_fault: vnode-backed object mapped by system map")); KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 || !fs.map->system_map, ("vm_fault: Object requiring giant mapped by system map")); if (fs.first_object->flags & OBJ_NEEDGIANT) mtx_unlock(&Giant); vm_object_pip_add(fs.first_object, 1); fs.lookup_still_valid = TRUE; if (wired) fault_type = prot; fs.first_m = NULL; /* * Search for the page at object/offset. */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; while (TRUE) { /* * If the object is dead, we stop here */ if (fs.object->flags & OBJ_DEAD) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * See if page is resident */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (fs.m != NULL) { /* * check for page-based copy on write. * We check fs.object == fs.first_object so * as to ensure the legacy COW mechanism is * used when the page in question is part of * a shadow object. Otherwise, vm_page_cowfault() * removes the page from the backing object, * which is not what we want. */ vm_page_lock_queues(); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); vm_page_unlock_queues(); unlock_and_deallocate(&fs); goto RetryFault; } /* * Wait/Retry if the page is busy. We have to do this * if the page is busy via either VPO_BUSY or * vm_page_t->busy because the vm_pager may be using * vm_page_t->busy for pageouts ( and even pageins if * it is the vnode pager ), and we could end up trying * to pagein and pageout the same page simultaneously. * * We can theoretically allow the busy case on a read * fault if the page is marked valid, but since such * pages are typically already pmap'd, putting that * special case in might be more effort then it is * worth. We cannot under any circumstances mess * around with a vm_page_t->busy page except, perhaps, * to pmap it. */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { vm_page_unlock_queues(); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); vm_page_lock_queues(); vm_page_free(fs.first_m); vm_page_unlock_queues(); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; } unlock_map(&fs); if (fs.vp != NULL) { int vfslck; vfslck = VFS_LOCK_GIANT(fs.vp->v_mount); vput(fs.vp); fs.vp = NULL; VFS_UNLOCK_GIANT(vfslck); } VM_OBJECT_LOCK(fs.object); if (fs.m == vm_page_lookup(fs.object, fs.pindex)) { vm_page_sleep_if_busy(fs.m, TRUE, "vmpfw"); } vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); PCPU_INC(cnt.v_intrans); vm_object_deallocate(fs.first_object); goto RetryFault; } vm_pageq_remove(fs.m); vm_page_unlock_queues(); /* * Mark page busy for other processes, and the * pagedaemon. If it still isn't completely valid * (readable), jump to readrest, else break-out ( we * found the page ). */ vm_page_busy(fs.m); if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && fs.m->object != kernel_object && fs.m->object != kmem_object) { goto readrest; } break; } /* * Page is not resident, If this is the search termination * or the pager might contain the page, allocate a new page. */ if (TRYPAGER || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); } /* * Allocate a new page for this object/offset pair. */ fs.m = NULL; if (!vm_page_count_severe()) { +#if VM_NRESERVLEVEL > 0 + if ((fs.object->flags & OBJ_COLORED) == 0) { + fs.object->flags |= OBJ_COLORED; + fs.object->pg_color = atop(vaddr) - + fs.pindex; + } +#endif fs.m = vm_page_alloc(fs.object, fs.pindex, (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO); } if (fs.m == NULL) { unlock_and_deallocate(&fs); VM_WAITPFAULT; goto RetryFault; } else if ((fs.m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) break; } readrest: /* * We have found a valid page or we have allocated a new page. * The page thus may not be valid or may not be entirely * valid. * * Attempt to fault-in the page if there is a chance that the * pager has it, and potentially fault in additional pages * at the same time. */ if (TRYPAGER) { int rv; int reqpage = 0; int ahead, behind; u_char behavior = vm_map_entry_behavior(fs.entry); if (behavior == MAP_ENTRY_BEHAV_RANDOM) { ahead = 0; behind = 0; } else { behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; if (behind > VM_FAULT_READ_BEHIND) behind = VM_FAULT_READ_BEHIND; ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; if (ahead > VM_FAULT_READ_AHEAD) ahead = VM_FAULT_READ_AHEAD; } is_first_object_locked = FALSE; if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || (behavior != MAP_ENTRY_BEHAV_RANDOM && fs.pindex >= fs.entry->lastr && fs.pindex < fs.entry->lastr + VM_FAULT_READ)) && (fs.first_object == fs.object || (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) && fs.first_object->type != OBJT_DEVICE) { vm_pindex_t firstpindex, tmppindex; if (fs.first_pindex < 2 * VM_FAULT_READ) firstpindex = 0; else firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; vm_page_lock_queues(); /* * note: partially valid pages cannot be * included in the lookahead - NFS piecemeal * writes will barf on it badly. */ for (tmppindex = fs.first_pindex - 1; tmppindex >= firstpindex; --tmppindex) { vm_page_t mt; mt = vm_page_lookup(fs.first_object, tmppindex); if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) break; if (mt->busy || (mt->oflags & VPO_BUSY) || (mt->flags & (PG_FICTITIOUS | PG_UNMANAGED)) || mt->hold_count || mt->wire_count) continue; pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } } vm_page_unlock_queues(); ahead += behind; behind = 0; } if (is_first_object_locked) VM_OBJECT_UNLOCK(fs.first_object); /* * now we find out if any other pages should be paged * in at this time this routine checks to see if the * pages surrounding this fault reside in the same * object as the page for this fault. If they do, * then they are faulted in also into the object. The * array "marray" returned contains an array of * vm_page_t structs where one of them is the * vm_page_t passed to the routine. The reqpage * return value is the index into the marray for the * vm_page_t passed to the routine. * * fs.m plus the additional pages are VPO_BUSY'd. * * XXX vm_fault_additional_pages() can block * without releasing the map lock. */ faultcount = vm_fault_additional_pages( fs.m, behind, ahead, marray, &reqpage); /* * update lastr imperfectly (we do not know how much * getpages will actually read), but good enough. * * XXX The following assignment modifies the map * without holding a write lock on it. */ fs.entry->lastr = fs.pindex + faultcount - behind; /* * Call the pager to retrieve the data, if any, after * releasing the lock on the map. We hold a ref on * fs.object and the pages are VPO_BUSY'd. */ unlock_map(&fs); rv = faultcount ? vm_pager_get_pages(fs.object, marray, faultcount, reqpage) : VM_PAGER_FAIL; if (rv == VM_PAGER_OK) { /* * Found the page. Leave it busy while we play * with it. */ /* * Relookup in case pager changed page. Pager * is responsible for disposition of old page * if moved. */ fs.m = vm_page_lookup(fs.object, fs.pindex); if (!fs.m) { unlock_and_deallocate(&fs); goto RetryFault; } hardfault++; break; /* break to PAGE HAS BEEN FOUND */ } /* * Remove the bogus page (which does not exist at this * object/offset); before doing so, we must get back * our object lock to preserve our invariant. * * Also wake up any other process that may want to bring * in this page. * * If this is the top-level object, we must leave the * busy page to prevent another process from rushing * past us, and inserting the page in that object at * the same time that we are. */ if (rv == VM_PAGER_ERROR) printf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm); /* * Data outside the range of the pager or an I/O error */ /* * XXX - the check for kernel_map is a kludge to work * around having the machine panic on a kernel space * fault w/ I/O error. */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { vm_page_lock_queues(); vm_page_free(fs.m); vm_page_unlock_queues(); fs.m = NULL; /* * XXX - we cannot just fall out at this * point, m has been freed and is invalid! */ } } /* * We get here if the object has default pager (or unwiring) * or the pager doesn't have the page. */ if (fs.object == fs.first_object) fs.first_m = fs.m; /* * Move on to the next object. Lock the next object before * unlocking the current one. */ fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); next_object = fs.object->backing_object; if (next_object == NULL) { /* * If there's no object left, fill the page in the top * object with zeros. */ if (fs.object != fs.first_object) { vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; VM_OBJECT_LOCK(fs.object); } fs.first_m = NULL; /* * Zero the page if necessary and mark it valid. */ if ((fs.m->flags & PG_ZERO) == 0) { pmap_zero_page(fs.m); } else { PCPU_INC(cnt.v_ozfod); } PCPU_INC(cnt.v_zfod); fs.m->valid = VM_PAGE_BITS_ALL; break; /* break to PAGE HAS BEEN FOUND */ } else { KASSERT(fs.object != next_object, ("object loop %p", next_object)); VM_OBJECT_LOCK(next_object); vm_object_pip_add(next_object, 1); if (fs.object != fs.first_object) vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); fs.object = next_object; } } KASSERT((fs.m->oflags & VPO_BUSY) != 0, ("vm_fault: not busy after main loop")); /* * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock * is held.] */ /* * If the page is being written, but isn't already owned by the * top-level object, we have to copy it into a new page owned by the * top-level object. */ if (fs.object != fs.first_object) { /* * We only really need to copy if we want to write it. */ if (fault_type & VM_PROT_WRITE) { /* * This allows pages to be virtually copied from a * backing_object into the first_object, where the * backing object has no other refs to it, and cannot * gain any more refs. Instead of a bcopy, we just * move the page from the backing object to the * first object. Note that we must mark the page * dirty in the first object so that it will go out * to swap when needed. */ is_first_object_locked = FALSE; if ( /* * Only one shadow object */ (fs.object->shadow_count == 1) && /* * No COW refs, except us */ (fs.object->ref_count == 1) && /* * No one else can look this object up */ (fs.object->handle == NULL) && /* * No other ways to look the object up */ ((fs.object->type == OBJT_DEFAULT) || (fs.object->type == OBJT_SWAP)) && (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && /* * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { vm_page_lock_queues(); /* * get rid of the unnecessary page */ vm_page_free(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ vm_page_rename(fs.m, fs.first_object, fs.first_pindex); vm_page_unlock_queues(); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; PCPU_INC(cnt.v_cow_optim); } else { /* * Oh, well, lets copy it. */ pmap_copy_page(fs.m, fs.first_m); fs.first_m->valid = VM_PAGE_BITS_ALL; } if (fs.m) { /* * We no longer need the old page or object. */ release_page(&fs); } /* * fs.object != fs.first_object due to above * conditional */ vm_object_pip_wakeup(fs.object); VM_OBJECT_UNLOCK(fs.object); /* * Only use the new page below... */ fs.object = fs.first_object; fs.pindex = fs.first_pindex; fs.m = fs.first_m; if (!is_first_object_locked) VM_OBJECT_LOCK(fs.object); PCPU_INC(cnt.v_cow_faults); } else { prot &= ~VM_PROT_WRITE; } } /* * We must verify that the maps have not changed since our last * lookup. */ if (!fs.lookup_still_valid) { vm_object_t retry_object; vm_pindex_t retry_pindex; vm_prot_t retry_prot; if (!vm_map_trylock_read(fs.map)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } fs.lookup_still_valid = TRUE; if (fs.map->timestamp != map_generation) { result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); /* * If we don't need the page any longer, put it on the inactive * list (the easiest thing to do here). If no one needs it, * pageout will grab it eventually. */ if (result != KERN_SUCCESS) { release_page(&fs); unlock_and_deallocate(&fs); /* * If retry of map lookup would have blocked then * retry fault from start. */ if (result == KERN_FAILURE) goto RetryFault; return (result); } if ((retry_object != fs.first_object) || (retry_pindex != fs.first_pindex)) { release_page(&fs); unlock_and_deallocate(&fs); goto RetryFault; } /* * Check whether the protection has changed or the object has * been copied while we left the map unlocked. Changing from * read to write permission is OK - we leave the page * write-protected, and catch the write fault. Changing from * write to read permission means that we can't mark the page * write-enabled after all. */ prot &= retry_prot; } } if (prot & VM_PROT_WRITE) { vm_object_set_writeable_dirty(fs.object); /* * If the fault is a write, we know that this page is being * written NOW so dirty it explicitly to save on * pmap_is_modified() calls later. * * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC * if the page is already dirty to prevent data written with * the expectation of being synced from not being synced. * Likewise if this entry does not request NOSYNC then make * sure the page isn't marked NOSYNC. Applications sharing * data should use the same flags to avoid ping ponging. * * Also tell the backing pager, if any, that it should remove * any swap backing since the page is now dirty. */ if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { if (fs.m->dirty == 0) fs.m->oflags |= VPO_NOSYNC; } else { fs.m->oflags &= ~VPO_NOSYNC; } if (fault_flags & VM_FAULT_DIRTY) { vm_page_dirty(fs.m); vm_pager_page_unswapped(fs.m); } } /* * Page had better still be busy */ KASSERT(fs.m->oflags & VPO_BUSY, ("vm_fault: page %p not busy!", fs.m)); /* * Sanity check: page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. */ if (fs.m->valid != VM_PAGE_BITS_ALL) { vm_page_zero_invalid(fs.m, TRUE); printf("Warning: page %p partially invalid on fault\n", fs.m); } VM_OBJECT_UNLOCK(fs.object); /* * Put this page into the physical map. We had to do the unlock above * because pmap_enter() may sleep. We don't put the page * back on the active queue until later so that the pageout daemon * won't find it (yet). */ pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); } VM_OBJECT_LOCK(fs.object); vm_page_lock_queues(); vm_page_flag_set(fs.m, PG_REFERENCED); /* * If the page is not wired down, then put it where the pageout daemon * can find it. */ if (fault_flags & VM_FAULT_WIRE_MASK) { if (wired) vm_page_wire(fs.m); else vm_page_unwire(fs.m, 1); } else { vm_page_activate(fs.m); } vm_page_unlock_queues(); vm_page_wakeup(fs.m); /* * Unlock everything, and return */ unlock_and_deallocate(&fs); if (hardfault) curthread->td_ru.ru_majflt++; else curthread->td_ru.ru_minflt++; return (KERN_SUCCESS); } /* * vm_fault_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of vm_map_pmap_enter, except it runs at page fault time instead * of mmap time. */ static void vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) { int i; vm_offset_t addr, starta; vm_pindex_t pindex; vm_page_t m; vm_object_t object; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) return; object = entry->object.vm_object; starta = addra - PFBAK * PAGE_SIZE; if (starta < entry->start) { starta = entry->start; } else if (starta > addra) { starta = 0; } for (i = 0; i < PAGEORDER_SIZE; i++) { vm_object_t backing_object, lobject; addr = addra + prefault_pageorder[i]; if (addr > addra + (PFFOR * PAGE_SIZE)) addr = 0; if (addr < starta || addr >= entry->end) continue; if (!pmap_is_prefaultable(pmap, addr)) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = object; VM_OBJECT_LOCK(lobject); while ((m = vm_page_lookup(lobject, pindex)) == NULL && lobject->type == OBJT_DEFAULT && (backing_object = lobject->backing_object) != NULL) { if (lobject->backing_object_offset & PAGE_MASK) break; pindex += lobject->backing_object_offset >> PAGE_SHIFT; VM_OBJECT_LOCK(backing_object); VM_OBJECT_UNLOCK(lobject); lobject = backing_object; } /* * give-up when a page is not in memory */ if (m == NULL) { VM_OBJECT_UNLOCK(lobject); break; } if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (m->busy == 0) && (m->flags & PG_FICTITIOUS) == 0) { vm_page_lock_queues(); pmap_enter_quick(pmap, addr, m, entry->protection); vm_page_unlock_queues(); } VM_OBJECT_UNLOCK(lobject); } } /* * vm_fault_quick: * * Ensure that the requested virtual address, which may be in userland, * is valid. Fault-in the page if necessary. Return -1 on failure. */ int vm_fault_quick(caddr_t v, int prot) { int r; if (prot & VM_PROT_WRITE) r = subyte(v, fubyte(v)); else r = fubyte(v); return(r); } /* * vm_fault_wire: * * Wire down a range of virtual addresses in a map. */ int vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t user_wire, boolean_t fictitious) { vm_offset_t va; int rv; /* * We simulate a fault to get the page and enter it in the physical * map. For user wiring, we only ask for read access on currently * read-only sections. */ for (va = start; va < end; va += PAGE_SIZE) { rv = vm_fault(map, va, user_wire ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE, user_wire ? VM_FAULT_USER_WIRE : VM_FAULT_CHANGE_WIRING); if (rv) { if (va != start) vm_fault_unwire(map, start, va, fictitious); return (rv); } } return (KERN_SUCCESS); } /* * vm_fault_unwire: * * Unwire a range of virtual addresses in a map. */ void vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t fictitious) { vm_paddr_t pa; vm_offset_t va; pmap_t pmap; pmap = vm_map_pmap(map); /* * Since the pages are wired down, we must be able to get their * mappings from the physical map system. */ for (va = start; va < end; va += PAGE_SIZE) { pa = pmap_extract(pmap, va); if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { vm_page_lock_queues(); vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); vm_page_unlock_queues(); } } } } /* * Routine: * vm_fault_copy_entry * Function: * Copy all of the pages from a wired-down map entry to another. * * In/out conditions: * The source and destination maps must be locked for write. * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) vm_map_t dst_map; vm_map_t src_map; vm_map_entry_t dst_entry; vm_map_entry_t src_entry; { vm_object_t backing_object, dst_object, object; vm_object_t src_object; vm_ooffset_t dst_offset; vm_ooffset_t src_offset; vm_pindex_t pindex; vm_prot_t prot; vm_offset_t vaddr; vm_page_t dst_m; vm_page_t src_m; #ifdef lint src_map++; #endif /* lint */ src_object = src_entry->object.vm_object; src_offset = src_entry->offset; /* * Create the top-level object for the destination entry. (Doesn't * actually shadow anything - we copy the pages directly.) */ dst_object = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(dst_entry->end - dst_entry->start)); +#if VM_NRESERVLEVEL > 0 + dst_object->flags |= OBJ_COLORED; + dst_object->pg_color = atop(dst_entry->start); +#endif VM_OBJECT_LOCK(dst_object); dst_entry->object.vm_object = dst_object; dst_entry->offset = 0; prot = dst_entry->max_protection; /* * Loop through all of the pages in the entry's range, copying each * one from the source object (it should be there) to the destination * object. */ for (vaddr = dst_entry->start, dst_offset = 0; vaddr < dst_entry->end; vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { /* * Allocate a page in the destination object */ do { dst_m = vm_page_alloc(dst_object, OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); if (dst_m == NULL) { VM_OBJECT_UNLOCK(dst_object); VM_WAIT; VM_OBJECT_LOCK(dst_object); } } while (dst_m == NULL); /* * Find the page in the source object, and copy it in. * (Because the source is wired down, the page will be in * memory.) */ VM_OBJECT_LOCK(src_object); object = src_object; pindex = 0; while ((src_m = vm_page_lookup(object, pindex + OFF_TO_IDX(dst_offset + src_offset))) == NULL && (src_entry->protection & VM_PROT_WRITE) == 0 && (backing_object = object->backing_object) != NULL) { /* * Allow fallback to backing objects if we are reading. */ VM_OBJECT_LOCK(backing_object); pindex += OFF_TO_IDX(object->backing_object_offset); VM_OBJECT_UNLOCK(object); object = backing_object; } if (src_m == NULL) panic("vm_fault_copy_wired: page missing"); pmap_copy_page(src_m, dst_m); VM_OBJECT_UNLOCK(object); dst_m->valid = VM_PAGE_BITS_ALL; VM_OBJECT_UNLOCK(dst_object); /* * Enter it in the pmap... */ pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); /* * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); vm_page_lock_queues(); vm_page_activate(dst_m); vm_page_unlock_queues(); vm_page_wakeup(dst_m); } VM_OBJECT_UNLOCK(dst_object); } /* * This routine checks around the requested page for other pages that * might be able to be faulted in. This routine brackets the viable * pages for the pages to be paged in. * * Inputs: * m, rbehind, rahead * * Outputs: * marray (array of vm_page_t), reqpage (index of requested page) * * Return value: * number of pages in marray * * This routine can't block. */ static int vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) vm_page_t m; int rbehind; int rahead; vm_page_t *marray; int *reqpage; { int i,j; vm_object_t object; vm_pindex_t pindex, startpindex, endpindex, tpindex; vm_page_t rtm; int cbehind, cahead; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); object = m->object; pindex = m->pindex; cbehind = cahead = 0; /* * if the requested page is not available, then give up now */ if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { return 0; } if ((cbehind == 0) && (cahead == 0)) { *reqpage = 0; marray[0] = m; return 1; } if (rahead > cahead) { rahead = cahead; } if (rbehind > cbehind) { rbehind = cbehind; } /* * scan backward for the read behind pages -- in memory */ if (pindex > 0) { if (rbehind > pindex) { rbehind = pindex; startpindex = 0; } else { startpindex = pindex - rbehind; } if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL && rtm->pindex >= startpindex) startpindex = rtm->pindex + 1; /* tpindex is unsigned; beware of numeric underflow. */ for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && tpindex < pindex; i++, tpindex--) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (rtm == NULL) { /* * Shift the allocated pages to the * beginning of the array. */ for (j = 0; j < i; j++) { marray[j] = marray[j + tpindex + 1 - startpindex]; } break; } marray[tpindex - startpindex] = rtm; } } else { startpindex = 0; i = 0; } marray[i] = m; /* page offset of the required page */ *reqpage = i; tpindex = pindex + 1; i++; /* * scan forward for the read ahead pages */ endpindex = tpindex + rahead; if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex) endpindex = rtm->pindex; if (endpindex > object->size) endpindex = object->size; for (; tpindex < endpindex; i++, tpindex++) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED); if (rtm == NULL) { break; } marray[i] = rtm; } /* return number of pages */ return i; } Index: head/sys/vm/vm_object.c =================================================================== --- head/sys/vm/vm_object.c (revision 174981) +++ head/sys/vm/vm_object.c (revision 174982) @@ -1,2212 +1,2257 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * Virtual memory object module. */ #include __FBSDID("$FreeBSD$"); +#include "opt_vm.h" + #include #include #include #include #include #include #include #include #include /* for curproc, pageproc */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #define EASY_SCAN_FACTOR 8 #define MSYNC_FLUSH_HARDSEQ 0x01 #define MSYNC_FLUSH_SOFTSEQ 0x02 /* * msync / VM object flushing optimizations */ static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, CTLFLAG_RW, &msync_flush_flags, 0, ""); static int old_msync; SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, "Use old (insecure) msync behavior"); static void vm_object_qcollapse(vm_object_t object); static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); static void vm_object_vndeallocate(vm_object_t object); /* * Virtual memory objects maintain the actual data * associated with allocated virtual memory. A given * page of memory exists within exactly one object. * * An object is only deallocated when all "references" * are given up. Only one "reference" to a given * region of an object should be writeable. * * Associated with each object is a list of all resident * memory pages belonging to that object; this list is * maintained by the "vm_page" module, and locked by the object's * lock. * * Each object also records a "pager" routine which is * used to retrieve (and store) pages to the proper backing * storage. In addition, objects may be backed by other * objects from which they were virtual-copied. * * The only items within the object structure which are * modified after time of creation are: * reference count locked by object's lock * pager routine locked by object's lock * */ struct object_q vm_object_list; struct mtx vm_object_list_mtx; /* lock for object list and count */ struct vm_object kernel_object_store; struct vm_object kmem_object_store; SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); static long object_collapses; SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, &object_collapses, 0, "VM object collapses"); static long object_bypasses; SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, &object_bypasses, 0, "VM object bypasses"); static uma_zone_t obj_zone; static int vm_object_zinit(void *mem, int size, int flags); #ifdef INVARIANTS static void vm_object_zdtor(void *mem, int size, void *arg); static void vm_object_zdtor(void *mem, int size, void *arg) { vm_object_t object; object = (vm_object_t)mem; KASSERT(TAILQ_EMPTY(&object->memq), ("object %p has resident pages", object)); +#if VM_NRESERVLEVEL > 0 + KASSERT(LIST_EMPTY(&object->rvq), + ("object %p has reservations", + object)); +#endif KASSERT(object->cache == NULL, ("object %p has cached pages", object)); KASSERT(object->paging_in_progress == 0, ("object %p paging_in_progress = %d", object, object->paging_in_progress)); KASSERT(object->resident_page_count == 0, ("object %p resident_page_count = %d", object, object->resident_page_count)); KASSERT(object->shadow_count == 0, ("object %p shadow_count = %d", object, object->shadow_count)); } #endif static int vm_object_zinit(void *mem, int size, int flags) { vm_object_t object; object = (vm_object_t)mem; bzero(&object->mtx, sizeof(object->mtx)); VM_OBJECT_LOCK_INIT(object, "standard object"); /* These are true for any object that has been freed */ object->paging_in_progress = 0; object->resident_page_count = 0; object->shadow_count = 0; return (0); } void _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) { TAILQ_INIT(&object->memq); LIST_INIT(&object->shadow_head); object->root = NULL; object->type = type; object->size = size; object->generation = 1; object->ref_count = 1; object->flags = 0; if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) object->flags = OBJ_ONEMAPPING; object->pg_color = 0; object->handle = NULL; object->backing_object = NULL; object->backing_object_offset = (vm_ooffset_t) 0; +#if VM_NRESERVLEVEL > 0 + LIST_INIT(&object->rvq); +#endif object->cache = NULL; mtx_lock(&vm_object_list_mtx); TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); mtx_unlock(&vm_object_list_mtx); } /* * vm_object_init: * * Initialize the VM objects module. */ void vm_object_init(void) { TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); +#if VM_NRESERVLEVEL > 0 + kernel_object->flags |= OBJ_COLORED; + kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); +#endif VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object"); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kmem_object); +#if VM_NRESERVLEVEL > 0 + kmem_object->flags |= OBJ_COLORED; + kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); +#endif /* * The lock portion of struct vm_object must be type stable due * to vm_pageout_fallback_object_lock locking a vm object * without holding any references to it. */ obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, #ifdef INVARIANTS vm_object_zdtor, #else NULL, #endif vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); } void vm_object_clear_flag(vm_object_t object, u_short bits) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); object->flags &= ~bits; } void vm_object_pip_add(vm_object_t object, short i) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); object->paging_in_progress += i; } void vm_object_pip_subtract(vm_object_t object, short i) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); object->paging_in_progress -= i; } void vm_object_pip_wakeup(vm_object_t object) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); object->paging_in_progress--; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); wakeup(object); } } void vm_object_pip_wakeupn(vm_object_t object, short i) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (i) object->paging_in_progress -= i; if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { vm_object_clear_flag(object, OBJ_PIPWNT); wakeup(object); } } void vm_object_pip_wait(vm_object_t object, char *waitid) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); while (object->paging_in_progress) { object->flags |= OBJ_PIPWNT; msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); } } /* * vm_object_allocate: * * Returns a new object with the given size. */ vm_object_t vm_object_allocate(objtype_t type, vm_pindex_t size) { vm_object_t object; object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); _vm_object_allocate(type, size, object); return (object); } /* * vm_object_reference: * * Gets another reference to the given object. Note: OBJ_DEAD * objects can be referenced during final cleaning. */ void vm_object_reference(vm_object_t object) { struct vnode *vp; if (object == NULL) return; VM_OBJECT_LOCK(object); object->ref_count++; if (object->type == OBJT_VNODE) { int vfslocked; vp = object->handle; VM_OBJECT_UNLOCK(object); vfslocked = VFS_LOCK_GIANT(vp->v_mount); vget(vp, LK_RETRY, curthread); VFS_UNLOCK_GIANT(vfslocked); } else VM_OBJECT_UNLOCK(object); } /* * vm_object_reference_locked: * * Gets another reference to the given object. * * The object must be locked. */ void vm_object_reference_locked(vm_object_t object) { struct vnode *vp; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT((object->flags & OBJ_DEAD) == 0, ("vm_object_reference_locked: dead object referenced")); object->ref_count++; if (object->type == OBJT_VNODE) { vp = object->handle; vref(vp); } } /* * Handle deallocating an object of type OBJT_VNODE. */ static void vm_object_vndeallocate(vm_object_t object) { struct vnode *vp = (struct vnode *) object->handle; VFS_ASSERT_GIANT(vp->v_mount); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_VNODE, ("vm_object_vndeallocate: not a vnode object")); KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); #ifdef INVARIANTS if (object->ref_count == 0) { vprint("vm_object_vndeallocate", vp); panic("vm_object_vndeallocate: bad object reference count"); } #endif object->ref_count--; if (object->ref_count == 0) { mp_fixme("Unlocked vflag access."); vp->v_vflag &= ~VV_TEXT; } VM_OBJECT_UNLOCK(object); /* * vrele may need a vop lock */ vrele(vp); } /* * vm_object_deallocate: * * Release a reference to the specified object, * gained either through a vm_object_allocate * or a vm_object_reference call. When all references * are gone, storage associated with this object * may be relinquished. * * No object may be locked. */ void vm_object_deallocate(vm_object_t object) { vm_object_t temp; while (object != NULL) { int vfslocked; vfslocked = 0; restart: VM_OBJECT_LOCK(object); if (object->type == OBJT_VNODE) { struct vnode *vp = (struct vnode *) object->handle; /* * Conditionally acquire Giant for a vnode-backed * object. We have to be careful since the type of * a vnode object can change while the object is * unlocked. */ if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) { vfslocked = 1; if (!mtx_trylock(&Giant)) { VM_OBJECT_UNLOCK(object); mtx_lock(&Giant); goto restart; } } vm_object_vndeallocate(object); VFS_UNLOCK_GIANT(vfslocked); return; } else /* * This is to handle the case that the object * changed type while we dropped its lock to * obtain Giant. */ VFS_UNLOCK_GIANT(vfslocked); KASSERT(object->ref_count != 0, ("vm_object_deallocate: object deallocated too many times: %d", object->type)); /* * If the reference count goes to 0 we start calling * vm_object_terminate() on the object chain. * A ref count of 1 may be a special case depending on the * shadow count being 0 or 1. */ object->ref_count--; if (object->ref_count > 1) { VM_OBJECT_UNLOCK(object); return; } else if (object->ref_count == 1) { if (object->shadow_count == 0) { vm_object_set_flag(object, OBJ_ONEMAPPING); } else if ((object->shadow_count == 1) && (object->handle == NULL) && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { vm_object_t robject; robject = LIST_FIRST(&object->shadow_head); KASSERT(robject != NULL, ("vm_object_deallocate: ref_count: %d, shadow_count: %d", object->ref_count, object->shadow_count)); if (!VM_OBJECT_TRYLOCK(robject)) { /* * Avoid a potential deadlock. */ object->ref_count++; VM_OBJECT_UNLOCK(object); /* * More likely than not the thread * holding robject's lock has lower * priority than the current thread. * Let the lower priority thread run. */ pause("vmo_de", 1); continue; } /* * Collapse object into its shadow unless its * shadow is dead. In that case, object will * be deallocated by the thread that is * deallocating its shadow. */ if ((robject->flags & OBJ_DEAD) == 0 && (robject->handle == NULL) && (robject->type == OBJT_DEFAULT || robject->type == OBJT_SWAP)) { robject->ref_count++; retry: if (robject->paging_in_progress) { VM_OBJECT_UNLOCK(object); vm_object_pip_wait(robject, "objde1"); temp = robject->backing_object; if (object == temp) { VM_OBJECT_LOCK(object); goto retry; } } else if (object->paging_in_progress) { VM_OBJECT_UNLOCK(robject); object->flags |= OBJ_PIPWNT; msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "objde2", 0); VM_OBJECT_LOCK(robject); temp = robject->backing_object; if (object == temp) { VM_OBJECT_LOCK(object); goto retry; } } else VM_OBJECT_UNLOCK(object); if (robject->ref_count == 1) { robject->ref_count--; object = robject; goto doterm; } object = robject; vm_object_collapse(object); VM_OBJECT_UNLOCK(object); continue; } VM_OBJECT_UNLOCK(robject); } VM_OBJECT_UNLOCK(object); return; } doterm: temp = object->backing_object; if (temp != NULL) { VM_OBJECT_LOCK(temp); LIST_REMOVE(object, shadow_list); temp->shadow_count--; temp->generation++; VM_OBJECT_UNLOCK(temp); object->backing_object = NULL; } /* * Don't double-terminate, we could be in a termination * recursion due to the terminate having to sync data * to disk. */ if ((object->flags & OBJ_DEAD) == 0) vm_object_terminate(object); else VM_OBJECT_UNLOCK(object); object = temp; } } /* * vm_object_terminate actually destroys the specified object, freeing * up all previously used resources. * * The object must be locked. * This routine may block. */ void vm_object_terminate(vm_object_t object) { vm_page_t p; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); /* * Make sure no one uses us. */ vm_object_set_flag(object, OBJ_DEAD); /* * wait for the pageout daemon to be done with the object */ vm_object_pip_wait(object, "objtrm"); KASSERT(!object->paging_in_progress, ("vm_object_terminate: pageout in progress")); /* * Clean and free the pages, as appropriate. All references to the * object are gone, so we don't need to lock it. */ if (object->type == OBJT_VNODE) { struct vnode *vp = (struct vnode *)object->handle; /* * Clean pages and flush buffers. */ vm_object_page_clean(object, 0, 0, OBJPC_SYNC); VM_OBJECT_UNLOCK(object); vinvalbuf(vp, V_SAVE, NULL, 0, 0); VM_OBJECT_LOCK(object); } KASSERT(object->ref_count == 0, ("vm_object_terminate: object with references, ref_count=%d", object->ref_count)); /* * Now free any remaining pages. For internal objects, this also * removes them from paging queues. Don't free wired pages, just * remove them from the object. */ vm_page_lock_queues(); while ((p = TAILQ_FIRST(&object->memq)) != NULL) { KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, ("vm_object_terminate: freeing busy page %p " "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); if (p->wire_count == 0) { vm_page_free(p); cnt.v_pfree++; } else { vm_page_remove(p); } } vm_page_unlock_queues(); +#if VM_NRESERVLEVEL > 0 + if (__predict_false(!LIST_EMPTY(&object->rvq))) + vm_reserv_break_all(object); +#endif if (__predict_false(object->cache != NULL)) vm_page_cache_free(object, 0, 0); /* * Let the pager know object is dead. */ vm_pager_deallocate(object); VM_OBJECT_UNLOCK(object); /* * Remove the object from the global object list. */ mtx_lock(&vm_object_list_mtx); TAILQ_REMOVE(&vm_object_list, object, object_list); mtx_unlock(&vm_object_list_mtx); /* * Free the space for the object. */ uma_zfree(obj_zone, object); } /* * vm_object_page_clean * * Clean all dirty pages in the specified range of object. Leaves page * on whatever queue it is currently on. If NOSYNC is set then do not * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), * leaving the object dirty. * * When stuffing pages asynchronously, allow clustering. XXX we need a * synchronous clustering mode implementation. * * Odd semantics: if start == end, we clean everything. * * The object must be locked. */ void vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) { vm_page_t p, np; vm_pindex_t tstart, tend; vm_pindex_t pi; int clearobjflags; int pagerflags; int curgeneration; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (object->type != OBJT_VNODE || (object->flags & OBJ_MIGHTBEDIRTY) == 0) return; pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; vm_object_set_flag(object, OBJ_CLEANING); tstart = start; if (end == 0) { tend = object->size; } else { tend = end; } vm_page_lock_queues(); /* * If the caller is smart and only msync()s a range he knows is * dirty, we may be able to avoid an object scan. This results in * a phenominal improvement in performance. We cannot do this * as a matter of course because the object may be huge - e.g. * the size might be in the gigabytes or terrabytes. */ if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { vm_pindex_t tscan; int scanlimit; int scanreset; scanreset = object->resident_page_count / EASY_SCAN_FACTOR; if (scanreset < 16) scanreset = 16; pagerflags |= VM_PAGER_IGNORE_CLEANCHK; scanlimit = scanreset; tscan = tstart; while (tscan < tend) { curgeneration = object->generation; p = vm_page_lookup(object, tscan); if (p == NULL || p->valid == 0) { if (--scanlimit == 0) break; ++tscan; continue; } vm_page_test_dirty(p); if ((p->dirty & p->valid) == 0) { if (--scanlimit == 0) break; ++tscan; continue; } /* * If we have been asked to skip nosync pages and * this is a nosync page, we can't continue. */ if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { if (--scanlimit == 0) break; ++tscan; continue; } scanlimit = scanreset; /* * This returns 0 if it was unable to busy the first * page (i.e. had to sleep). */ tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); } /* * If everything was dirty and we flushed it successfully, * and the requested range is not the entire object, we * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can * return immediately. */ if (tscan >= tend && (tstart || tend < object->size)) { vm_page_unlock_queues(); vm_object_clear_flag(object, OBJ_CLEANING); return; } pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; } /* * Generally set CLEANCHK interlock and make the page read-only so * we can then clear the object flags. * * However, if this is a nosync mmap then the object is likely to * stay dirty so do not mess with the page and do not clear the * object flags. */ clearobjflags = 1; TAILQ_FOREACH(p, &object->memq, listq) { p->oflags |= VPO_CLEANCHK; if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) clearobjflags = 0; else pmap_remove_write(p); } if (clearobjflags && (tstart == 0) && (tend == object->size)) { struct vnode *vp; vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); if (object->type == OBJT_VNODE && (vp = (struct vnode *)object->handle) != NULL) { VI_LOCK(vp); if (vp->v_iflag & VI_OBJDIRTY) vp->v_iflag &= ~VI_OBJDIRTY; VI_UNLOCK(vp); } } rescan: curgeneration = object->generation; for (p = TAILQ_FIRST(&object->memq); p; p = np) { int n; np = TAILQ_NEXT(p, listq); again: pi = p->pindex; if ((p->oflags & VPO_CLEANCHK) == 0 || (pi < tstart) || (pi >= tend) || p->valid == 0) { p->oflags &= ~VPO_CLEANCHK; continue; } vm_page_test_dirty(p); if ((p->dirty & p->valid) == 0) { p->oflags &= ~VPO_CLEANCHK; continue; } /* * If we have been asked to skip nosync pages and this is a * nosync page, skip it. Note that the object flags were * not cleared in this case so we do not have to set them. */ if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { p->oflags &= ~VPO_CLEANCHK; continue; } n = vm_object_page_collect_flush(object, p, curgeneration, pagerflags); if (n == 0) goto rescan; if (object->generation != curgeneration) goto rescan; /* * Try to optimize the next page. If we can't we pick up * our (random) scan where we left off. */ if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { if ((p = vm_page_lookup(object, pi + n)) != NULL) goto again; } } vm_page_unlock_queues(); #if 0 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); #endif vm_object_clear_flag(object, OBJ_CLEANING); return; } static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) { int runlen; int maxf; int chkb; int maxb; int i; vm_pindex_t pi; vm_page_t maf[vm_pageout_page_count]; vm_page_t mab[vm_pageout_page_count]; vm_page_t ma[vm_pageout_page_count]; mtx_assert(&vm_page_queue_mtx, MA_OWNED); pi = p->pindex; while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { vm_page_lock_queues(); if (object->generation != curgeneration) { return(0); } } maxf = 0; for(i = 1; i < vm_pageout_page_count; i++) { vm_page_t tp; if ((tp = vm_page_lookup(object, pi + i)) != NULL) { if ((tp->oflags & VPO_BUSY) || ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; vm_page_test_dirty(tp); if ((tp->dirty & tp->valid) == 0) { tp->oflags &= ~VPO_CLEANCHK; break; } maf[ i - 1 ] = tp; maxf++; continue; } break; } maxb = 0; chkb = vm_pageout_page_count - maxf; if (chkb) { for(i = 1; i < chkb;i++) { vm_page_t tp; if ((tp = vm_page_lookup(object, pi - i)) != NULL) { if ((tp->oflags & VPO_BUSY) || ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; vm_page_test_dirty(tp); if ((tp->dirty & tp->valid) == 0) { tp->oflags &= ~VPO_CLEANCHK; break; } mab[ i - 1 ] = tp; maxb++; continue; } break; } } for(i = 0; i < maxb; i++) { int index = (maxb - i) - 1; ma[index] = mab[i]; ma[index]->oflags &= ~VPO_CLEANCHK; } p->oflags &= ~VPO_CLEANCHK; ma[maxb] = p; for(i = 0; i < maxf; i++) { int index = (maxb + i) + 1; ma[index] = maf[i]; ma[index]->oflags &= ~VPO_CLEANCHK; } runlen = maxb + maxf + 1; vm_pageout_flush(ma, runlen, pagerflags); for (i = 0; i < runlen; i++) { if (ma[i]->valid & ma[i]->dirty) { pmap_remove_write(ma[i]); ma[i]->oflags |= VPO_CLEANCHK; /* * maxf will end up being the actual number of pages * we wrote out contiguously, non-inclusive of the * first page. We do not count look-behind pages. */ if (i >= maxb + 1 && (maxf > i - maxb - 1)) maxf = i - maxb - 1; } } return(maxf + 1); } /* * Note that there is absolutely no sense in writing out * anonymous objects, so we track down the vnode object * to write out. * We invalidate (remove) all pages from the address space * for semantic correctness. * * Note: certain anonymous maps, such as MAP_NOSYNC maps, * may start out with a NULL object. */ void vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, boolean_t syncio, boolean_t invalidate) { vm_object_t backing_object; struct vnode *vp; struct mount *mp; int flags; if (object == NULL) return; VM_OBJECT_LOCK(object); while ((backing_object = object->backing_object) != NULL) { VM_OBJECT_LOCK(backing_object); offset += object->backing_object_offset; VM_OBJECT_UNLOCK(object); object = backing_object; if (object->size < OFF_TO_IDX(offset + size)) size = IDX_TO_OFF(object->size) - offset; } /* * Flush pages if writing is allowed, invalidate them * if invalidation requested. Pages undergoing I/O * will be ignored by vm_object_page_remove(). * * We cannot lock the vnode and then wait for paging * to complete without deadlocking against vm_fault. * Instead we simply call vm_object_page_remove() and * allow it to block internally on a page-by-page * basis when it encounters pages undergoing async * I/O. */ if (object->type == OBJT_VNODE && (object->flags & OBJ_MIGHTBEDIRTY) != 0) { int vfslocked; vp = object->handle; VM_OBJECT_UNLOCK(object); (void) vn_start_write(vp, &mp, V_WAIT); vfslocked = VFS_LOCK_GIANT(vp->v_mount); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); flags = (syncio || invalidate) ? OBJPC_SYNC : 0; flags |= invalidate ? OBJPC_INVAL : 0; VM_OBJECT_LOCK(object); vm_object_page_clean(object, OFF_TO_IDX(offset), OFF_TO_IDX(offset + size + PAGE_MASK), flags); VM_OBJECT_UNLOCK(object); VOP_UNLOCK(vp, 0, curthread); VFS_UNLOCK_GIANT(vfslocked); vn_finished_write(mp); VM_OBJECT_LOCK(object); } if ((object->type == OBJT_VNODE || object->type == OBJT_DEVICE) && invalidate) { boolean_t purge; purge = old_msync || (object->type == OBJT_DEVICE); vm_object_page_remove(object, OFF_TO_IDX(offset), OFF_TO_IDX(offset + size + PAGE_MASK), purge ? FALSE : TRUE); } VM_OBJECT_UNLOCK(object); } /* * vm_object_madvise: * * Implements the madvise function at the object/page level. * * MADV_WILLNEED (any object) * * Activate the specified pages if they are resident. * * MADV_DONTNEED (any object) * * Deactivate the specified pages if they are resident. * * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, * OBJ_ONEMAPPING only) * * Deactivate and clean the specified pages if they are * resident. This permits the process to reuse the pages * without faulting or the kernel to reclaim the pages * without I/O. */ void vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) { vm_pindex_t end, tpindex; vm_object_t backing_object, tobject; vm_page_t m; if (object == NULL) return; VM_OBJECT_LOCK(object); end = pindex + count; /* * Locate and adjust resident pages */ for (; pindex < end; pindex += 1) { relookup: tobject = object; tpindex = pindex; shadowlookup: /* * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages * and those pages must be OBJ_ONEMAPPING. */ if (advise == MADV_FREE) { if ((tobject->type != OBJT_DEFAULT && tobject->type != OBJT_SWAP) || (tobject->flags & OBJ_ONEMAPPING) == 0) { goto unlock_tobject; } } m = vm_page_lookup(tobject, tpindex); if (m == NULL && advise == MADV_WILLNEED) { /* * If the page is cached, reactivate it. */ m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED | VM_ALLOC_NOBUSY); } if (m == NULL) { /* * There may be swap even if there is no backing page */ if (advise == MADV_FREE && tobject->type == OBJT_SWAP) swap_pager_freespace(tobject, tpindex, 1); /* * next object */ backing_object = tobject->backing_object; if (backing_object == NULL) goto unlock_tobject; VM_OBJECT_LOCK(backing_object); tpindex += OFF_TO_IDX(tobject->backing_object_offset); if (tobject != object) VM_OBJECT_UNLOCK(tobject); tobject = backing_object; goto shadowlookup; } /* * If the page is busy or not in a normal active state, * we skip it. If the page is not managed there are no * page queues to mess with. Things can break if we mess * with pages in any of the below states. */ vm_page_lock_queues(); if (m->hold_count || m->wire_count || (m->flags & PG_UNMANAGED) || m->valid != VM_PAGE_BITS_ALL) { vm_page_unlock_queues(); goto unlock_tobject; } if ((m->oflags & VPO_BUSY) || m->busy) { vm_page_flag_set(m, PG_REFERENCED); vm_page_unlock_queues(); if (object != tobject) VM_OBJECT_UNLOCK(object); m->oflags |= VPO_WANTED; msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0); VM_OBJECT_LOCK(object); goto relookup; } if (advise == MADV_WILLNEED) { vm_page_activate(m); } else if (advise == MADV_DONTNEED) { vm_page_dontneed(m); } else if (advise == MADV_FREE) { /* * Mark the page clean. This will allow the page * to be freed up by the system. However, such pages * are often reused quickly by malloc()/free() * so we do not do anything that would cause * a page fault if we can help it. * * Specifically, we do not try to actually free * the page now nor do we try to put it in the * cache (which would cause a page fault on reuse). * * But we do make the page is freeable as we * can without actually taking the step of unmapping * it. */ pmap_clear_modify(m); m->dirty = 0; m->act_count = 0; vm_page_dontneed(m); } vm_page_unlock_queues(); if (advise == MADV_FREE && tobject->type == OBJT_SWAP) swap_pager_freespace(tobject, tpindex, 1); unlock_tobject: if (tobject != object) VM_OBJECT_UNLOCK(tobject); } VM_OBJECT_UNLOCK(object); } /* * vm_object_shadow: * * Create a new object which is backed by the * specified existing object range. The source * object reference is deallocated. * * The new object and offset into that object * are returned in the source parameters. */ void vm_object_shadow( vm_object_t *object, /* IN/OUT */ vm_ooffset_t *offset, /* IN/OUT */ vm_size_t length) { vm_object_t source; vm_object_t result; source = *object; /* * Don't create the new object if the old object isn't shared. */ if (source != NULL) { VM_OBJECT_LOCK(source); if (source->ref_count == 1 && source->handle == NULL && (source->type == OBJT_DEFAULT || source->type == OBJT_SWAP)) { VM_OBJECT_UNLOCK(source); return; } VM_OBJECT_UNLOCK(source); } /* * Allocate a new object with the given length. */ result = vm_object_allocate(OBJT_DEFAULT, length); /* * The new object shadows the source object, adding a reference to it. * Our caller changes his reference to point to the new object, * removing a reference to the source object. Net result: no change * of reference count. * * Try to optimize the result object's page color when shadowing * in order to maintain page coloring consistency in the combined * shadowed object. */ result->backing_object = source; /* * Store the offset into the source object, and fix up the offset into * the new object. */ result->backing_object_offset = *offset; if (source != NULL) { VM_OBJECT_LOCK(source); LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); source->shadow_count++; source->generation++; +#if VM_NRESERVLEVEL > 0 + result->flags |= source->flags & (OBJ_NEEDGIANT | OBJ_COLORED); + result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & + ((1 << (VM_NFREEORDER - 1)) - 1); +#else result->flags |= source->flags & OBJ_NEEDGIANT; +#endif VM_OBJECT_UNLOCK(source); } /* * Return the new things */ *offset = 0; *object = result; } /* * vm_object_split: * * Split the pages in a map entry into a new object. This affords * easier removal of unused pages, and keeps object inheritance from * being a negative impact on memory usage. */ void vm_object_split(vm_map_entry_t entry) { vm_page_t m, m_next; vm_object_t orig_object, new_object, source; vm_pindex_t idx, offidxstart; vm_size_t size; orig_object = entry->object.vm_object; if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) return; if (orig_object->ref_count <= 1) return; VM_OBJECT_UNLOCK(orig_object); offidxstart = OFF_TO_IDX(entry->offset); size = atop(entry->end - entry->start); /* * If swap_pager_copy() is later called, it will convert new_object * into a swap object. */ new_object = vm_object_allocate(OBJT_DEFAULT, size); /* * At this point, the new object is still private, so the order in * which the original and new objects are locked does not matter. */ VM_OBJECT_LOCK(new_object); VM_OBJECT_LOCK(orig_object); source = orig_object->backing_object; if (source != NULL) { VM_OBJECT_LOCK(source); if ((source->flags & OBJ_DEAD) != 0) { VM_OBJECT_UNLOCK(source); VM_OBJECT_UNLOCK(orig_object); VM_OBJECT_UNLOCK(new_object); vm_object_deallocate(new_object); VM_OBJECT_LOCK(orig_object); return; } LIST_INSERT_HEAD(&source->shadow_head, new_object, shadow_list); source->shadow_count++; source->generation++; vm_object_reference_locked(source); /* for new_object */ vm_object_clear_flag(source, OBJ_ONEMAPPING); VM_OBJECT_UNLOCK(source); new_object->backing_object_offset = orig_object->backing_object_offset + entry->offset; new_object->backing_object = source; } new_object->flags |= orig_object->flags & OBJ_NEEDGIANT; retry: if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) { if (m->pindex < offidxstart) { m = vm_page_splay(offidxstart, orig_object->root); if ((orig_object->root = m)->pindex < offidxstart) m = TAILQ_NEXT(m, listq); } } vm_page_lock_queues(); for (; m != NULL && (idx = m->pindex - offidxstart) < size; m = m_next) { m_next = TAILQ_NEXT(m, listq); /* * We must wait for pending I/O to complete before we can * rename the page. * * We do not have to VM_PROT_NONE the page as mappings should * not be changed by this operation. */ if ((m->oflags & VPO_BUSY) || m->busy) { vm_page_flag_set(m, PG_REFERENCED); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(new_object); m->oflags |= VPO_WANTED; msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); VM_OBJECT_LOCK(new_object); goto retry; } vm_page_rename(m, new_object, idx); /* page automatically made dirty by rename and cache handled */ vm_page_busy(m); } vm_page_unlock_queues(); if (orig_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case the orig_object's * and new_object's locks are released and reacquired. */ swap_pager_copy(orig_object, new_object, offidxstart, 0); /* * Transfer any cached pages from orig_object to new_object. */ if (__predict_false(orig_object->cache != NULL)) vm_page_cache_transfer(orig_object, offidxstart, new_object); } VM_OBJECT_UNLOCK(orig_object); TAILQ_FOREACH(m, &new_object->memq, listq) vm_page_wakeup(m); VM_OBJECT_UNLOCK(new_object); entry->object.vm_object = new_object; entry->offset = 0LL; vm_object_deallocate(orig_object); VM_OBJECT_LOCK(new_object); } #define OBSC_TEST_ALL_SHADOWED 0x0001 #define OBSC_COLLAPSE_NOWAIT 0x0002 #define OBSC_COLLAPSE_WAIT 0x0004 static int vm_object_backing_scan(vm_object_t object, int op) { int r = 1; vm_page_t p; vm_object_t backing_object; vm_pindex_t backing_offset_index; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); backing_object = object->backing_object; backing_offset_index = OFF_TO_IDX(object->backing_object_offset); /* * Initial conditions */ if (op & OBSC_TEST_ALL_SHADOWED) { /* * We do not want to have to test for the existence of cache * or swap pages in the backing object. XXX but with the * new swapper this would be pretty easy to do. * * XXX what about anonymous MAP_SHARED memory that hasn't * been ZFOD faulted yet? If we do not test for this, the * shadow test may succeed! XXX */ if (backing_object->type != OBJT_DEFAULT) { return (0); } } if (op & OBSC_COLLAPSE_WAIT) { vm_object_set_flag(backing_object, OBJ_DEAD); } /* * Our scan */ p = TAILQ_FIRST(&backing_object->memq); while (p) { vm_page_t next = TAILQ_NEXT(p, listq); vm_pindex_t new_pindex = p->pindex - backing_offset_index; if (op & OBSC_TEST_ALL_SHADOWED) { vm_page_t pp; /* * Ignore pages outside the parent object's range * and outside the parent object's mapping of the * backing object. * * note that we do not busy the backing object's * page. */ if ( p->pindex < backing_offset_index || new_pindex >= object->size ) { p = next; continue; } /* * See if the parent has the page or if the parent's * object pager has the page. If the parent has the * page but the page is not valid, the parent's * object pager must have the page. * * If this fails, the parent does not completely shadow * the object and we might as well give up now. */ pp = vm_page_lookup(object, new_pindex); if ( (pp == NULL || pp->valid == 0) && !vm_pager_has_page(object, new_pindex, NULL, NULL) ) { r = 0; break; } } /* * Check for busy page */ if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { vm_page_t pp; if (op & OBSC_COLLAPSE_NOWAIT) { if ((p->oflags & VPO_BUSY) || !p->valid || p->busy) { p = next; continue; } } else if (op & OBSC_COLLAPSE_WAIT) { if ((p->oflags & VPO_BUSY) || p->busy) { vm_page_lock_queues(); vm_page_flag_set(p, PG_REFERENCED); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); p->oflags |= VPO_WANTED; msleep(p, VM_OBJECT_MTX(backing_object), PDROP | PVM, "vmocol", 0); VM_OBJECT_LOCK(object); VM_OBJECT_LOCK(backing_object); /* * If we slept, anything could have * happened. Since the object is * marked dead, the backing offset * should not have changed so we * just restart our scan. */ p = TAILQ_FIRST(&backing_object->memq); continue; } } KASSERT( p->object == backing_object, ("vm_object_backing_scan: object mismatch") ); /* * Destroy any associated swap */ if (backing_object->type == OBJT_SWAP) { swap_pager_freespace( backing_object, p->pindex, 1 ); } if ( p->pindex < backing_offset_index || new_pindex >= object->size ) { /* * Page is out of the parent object's range, we * can simply destroy it. */ vm_page_lock_queues(); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); vm_page_unlock_queues(); p = next; continue; } pp = vm_page_lookup(object, new_pindex); if ( pp != NULL || vm_pager_has_page(object, new_pindex, NULL, NULL) ) { /* * page already exists in parent OR swap exists * for this location in the parent. Destroy * the original page from the backing object. * * Leave the parent's page alone */ vm_page_lock_queues(); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); vm_page_unlock_queues(); p = next; continue; } +#if VM_NRESERVLEVEL > 0 /* + * Rename the reservation. + */ + vm_reserv_rename(p, object, backing_object, + backing_offset_index); +#endif + + /* * Page does not exist in parent, rename the * page from the backing object to the main object. * * If the page was mapped to a process, it can remain * mapped through the rename. */ vm_page_lock_queues(); vm_page_rename(p, object, new_pindex); vm_page_unlock_queues(); /* page automatically made dirty by rename */ } p = next; } return (r); } /* * this version of collapse allows the operation to occur earlier and * when paging_in_progress is true for an object... This is not a complete * operation, but should plug 99.9% of the rest of the leaks. */ static void vm_object_qcollapse(vm_object_t object) { vm_object_t backing_object = object->backing_object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); if (backing_object->ref_count != 1) return; vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); } /* * vm_object_collapse: * * Collapse an object with the object backing it. * Pages in the backing object are moved into the * parent, and the backing object is deallocated. */ void vm_object_collapse(vm_object_t object) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); while (TRUE) { vm_object_t backing_object; /* * Verify that the conditions are right for collapse: * * The object exists and the backing object exists. */ if ((backing_object = object->backing_object) == NULL) break; /* * we check the backing object first, because it is most likely * not collapsable. */ VM_OBJECT_LOCK(backing_object); if (backing_object->handle != NULL || (backing_object->type != OBJT_DEFAULT && backing_object->type != OBJT_SWAP) || (backing_object->flags & OBJ_DEAD) || object->handle != NULL || (object->type != OBJT_DEFAULT && object->type != OBJT_SWAP) || (object->flags & OBJ_DEAD)) { VM_OBJECT_UNLOCK(backing_object); break; } if ( object->paging_in_progress != 0 || backing_object->paging_in_progress != 0 ) { vm_object_qcollapse(object); VM_OBJECT_UNLOCK(backing_object); break; } /* * We know that we can either collapse the backing object (if * the parent is the only reference to it) or (perhaps) have * the parent bypass the object if the parent happens to shadow * all the resident pages in the entire backing object. * * This is ignoring pager-backed pages such as swap pages. * vm_object_backing_scan fails the shadowing test in this * case. */ if (backing_object->ref_count == 1) { /* * If there is exactly one reference to the backing * object, we can collapse it into the parent. */ vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); + +#if VM_NRESERVLEVEL > 0 + /* + * Break any reservations from backing_object. + */ + if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) + vm_reserv_break_all(backing_object); +#endif /* * Move the pager from backing_object to object. */ if (backing_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case * the backing_object's and object's locks are * released and reacquired. */ swap_pager_copy( backing_object, object, OFF_TO_IDX(object->backing_object_offset), TRUE); /* * Free any cached pages from backing_object. */ if (__predict_false(backing_object->cache != NULL)) vm_page_cache_free(backing_object, 0, 0); } /* * Object now shadows whatever backing_object did. * Note that the reference to * backing_object->backing_object moves from within * backing_object to within object. */ LIST_REMOVE(object, shadow_list); backing_object->shadow_count--; backing_object->generation++; if (backing_object->backing_object) { VM_OBJECT_LOCK(backing_object->backing_object); LIST_REMOVE(backing_object, shadow_list); LIST_INSERT_HEAD( &backing_object->backing_object->shadow_head, object, shadow_list); /* * The shadow_count has not changed. */ backing_object->backing_object->generation++; VM_OBJECT_UNLOCK(backing_object->backing_object); } object->backing_object = backing_object->backing_object; object->backing_object_offset += backing_object->backing_object_offset; /* * Discard backing_object. * * Since the backing object has no pages, no pager left, * and no object references within it, all that is * necessary is to dispose of it. */ KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); VM_OBJECT_UNLOCK(backing_object); mtx_lock(&vm_object_list_mtx); TAILQ_REMOVE( &vm_object_list, backing_object, object_list ); mtx_unlock(&vm_object_list_mtx); uma_zfree(obj_zone, backing_object); object_collapses++; } else { vm_object_t new_backing_object; /* * If we do not entirely shadow the backing object, * there is nothing we can do so we give up. */ if (object->resident_page_count != object->size && vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { VM_OBJECT_UNLOCK(backing_object); break; } /* * Make the parent shadow the next object in the * chain. Deallocating backing_object will not remove * it, since its reference count is at least 2. */ LIST_REMOVE(object, shadow_list); backing_object->shadow_count--; backing_object->generation++; new_backing_object = backing_object->backing_object; if ((object->backing_object = new_backing_object) != NULL) { VM_OBJECT_LOCK(new_backing_object); LIST_INSERT_HEAD( &new_backing_object->shadow_head, object, shadow_list ); new_backing_object->shadow_count++; new_backing_object->generation++; vm_object_reference_locked(new_backing_object); VM_OBJECT_UNLOCK(new_backing_object); object->backing_object_offset += backing_object->backing_object_offset; } /* * Drop the reference count on backing_object. Since * its ref_count was at least 2, it will not vanish. */ backing_object->ref_count--; VM_OBJECT_UNLOCK(backing_object); object_bypasses++; } /* * Try again with this object's new backing object. */ } } /* * vm_object_page_remove: * * Removes all physical pages in the given range from the * object's list of pages. If the range's end is zero, all * physical pages from the range's start to the end of the object * are deleted. * * The object must be locked. */ void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only) { vm_page_t p, next; int wirings; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (object->resident_page_count == 0) goto skipmemq; /* * Since physically-backed objects do not use managed pages, we can't * remove pages from the object (we must instead remove the page * references, and then destroy the object). */ KASSERT(object->type != OBJT_PHYS || object == kernel_object || object == kmem_object, ("attempt to remove pages from a physical object")); vm_object_pip_add(object, 1); again: vm_page_lock_queues(); if ((p = TAILQ_FIRST(&object->memq)) != NULL) { if (p->pindex < start) { p = vm_page_splay(start, object->root); if ((object->root = p)->pindex < start) p = TAILQ_NEXT(p, listq); } } /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex * or (2) NULL. */ for (; p != NULL && (p->pindex < end || end == 0); p = next) { next = TAILQ_NEXT(p, listq); /* * If the page is wired for any reason besides the * existence of managed, wired mappings, then it cannot * be freed. */ if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { pmap_remove_all(p); /* Account for removal of managed, wired mappings. */ p->wire_count -= wirings; if (!clean_only) p->valid = 0; continue; } if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) goto again; if (clean_only && p->valid) { pmap_remove_write(p); if (p->valid & p->dirty) continue; } pmap_remove_all(p); /* Account for removal of managed, wired mappings. */ if (wirings != 0) p->wire_count -= wirings; vm_page_free(p); } vm_page_unlock_queues(); vm_object_pip_wakeup(object); skipmemq: if (__predict_false(object->cache != NULL)) vm_page_cache_free(object, start, end); } /* * Routine: vm_object_coalesce * Function: Coalesces two objects backing up adjoining * regions of memory into a single object. * * returns TRUE if objects were combined. * * NOTE: Only works at the moment if the second object is NULL - * if it's not, which object do we lock first? * * Parameters: * prev_object First object to coalesce * prev_offset Offset into prev_object * prev_size Size of reference to prev_object * next_size Size of reference to the second object * * Conditions: * The object must *not* be locked. */ boolean_t vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, vm_size_t prev_size, vm_size_t next_size) { vm_pindex_t next_pindex; if (prev_object == NULL) return (TRUE); VM_OBJECT_LOCK(prev_object); if (prev_object->type != OBJT_DEFAULT && prev_object->type != OBJT_SWAP) { VM_OBJECT_UNLOCK(prev_object); return (FALSE); } /* * Try to collapse the object first */ vm_object_collapse(prev_object); /* * Can't coalesce if: . more than one reference . paged out . shadows * another object . has a copy elsewhere (any of which mean that the * pages not mapped to prev_entry may be in use anyway) */ if (prev_object->backing_object != NULL) { VM_OBJECT_UNLOCK(prev_object); return (FALSE); } prev_size >>= PAGE_SHIFT; next_size >>= PAGE_SHIFT; next_pindex = OFF_TO_IDX(prev_offset) + prev_size; if ((prev_object->ref_count > 1) && (prev_object->size != next_pindex)) { VM_OBJECT_UNLOCK(prev_object); return (FALSE); } /* * Remove any pages that may still be in the object from a previous * deallocation. */ if (next_pindex < prev_object->size) { vm_object_page_remove(prev_object, next_pindex, next_pindex + next_size, FALSE); if (prev_object->type == OBJT_SWAP) swap_pager_freespace(prev_object, next_pindex, next_size); } /* * Extend the object if necessary. */ if (next_pindex + next_size > prev_object->size) prev_object->size = next_pindex + next_size; VM_OBJECT_UNLOCK(prev_object); return (TRUE); } void vm_object_set_writeable_dirty(vm_object_t object) { struct vnode *vp; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) return; vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); if (object->type == OBJT_VNODE && (vp = (struct vnode *)object->handle) != NULL) { VI_LOCK(vp); vp->v_iflag |= VI_OBJDIRTY; VI_UNLOCK(vp); } } #include "opt_ddb.h" #ifdef DDB #include #include #include static int _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) { vm_map_t tmpm; vm_map_entry_t tmpe; vm_object_t obj; int entcount; if (map == 0) return 0; if (entry == 0) { tmpe = map->header.next; entcount = map->nentries; while (entcount-- && (tmpe != &map->header)) { if (_vm_object_in_map(map, object, tmpe)) { return 1; } tmpe = tmpe->next; } } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { tmpm = entry->object.sub_map; tmpe = tmpm->header.next; entcount = tmpm->nentries; while (entcount-- && tmpe != &tmpm->header) { if (_vm_object_in_map(tmpm, object, tmpe)) { return 1; } tmpe = tmpe->next; } } else if ((obj = entry->object.vm_object) != NULL) { for (; obj; obj = obj->backing_object) if (obj == object) { return 1; } } return 0; } static int vm_object_in_map(vm_object_t object) { struct proc *p; /* sx_slock(&allproc_lock); */ FOREACH_PROC_IN_SYSTEM(p) { if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) continue; if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { /* sx_sunlock(&allproc_lock); */ return 1; } } /* sx_sunlock(&allproc_lock); */ if (_vm_object_in_map(kernel_map, object, 0)) return 1; if (_vm_object_in_map(kmem_map, object, 0)) return 1; if (_vm_object_in_map(pager_map, object, 0)) return 1; if (_vm_object_in_map(buffer_map, object, 0)) return 1; return 0; } DB_SHOW_COMMAND(vmochk, vm_object_check) { vm_object_t object; /* * make sure that internal objs are in a map somewhere * and none have zero ref counts. */ TAILQ_FOREACH(object, &vm_object_list, object_list) { if (object->handle == NULL && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { if (object->ref_count == 0) { db_printf("vmochk: internal obj has zero ref count: %ld\n", (long)object->size); } if (!vm_object_in_map(object)) { db_printf( "vmochk: internal obj is not in a map: " "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", object->ref_count, (u_long)object->size, (u_long)object->size, (void *)object->backing_object); } } } } /* * vm_object_print: [ debug ] */ DB_SHOW_COMMAND(object, vm_object_print_static) { /* XXX convert args. */ vm_object_t object = (vm_object_t)addr; boolean_t full = have_addr; vm_page_t p; /* XXX count is an (unused) arg. Avoid shadowing it. */ #define count was_count int count; if (object == NULL) return; db_iprintf( "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", object, (int)object->type, (uintmax_t)object->size, object->resident_page_count, object->ref_count, object->flags); db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", object->shadow_count, object->backing_object ? object->backing_object->ref_count : 0, object->backing_object, (uintmax_t)object->backing_object_offset); if (!full) return; db_indent += 2; count = 0; TAILQ_FOREACH(p, &object->memq, listq) { if (count == 0) db_iprintf("memory:="); else if (count == 6) { db_printf("\n"); db_iprintf(" ..."); count = 0; } else db_printf(","); count++; db_printf("(off=0x%jx,page=0x%jx)", (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); } if (count != 0) db_printf("\n"); db_indent -= 2; } /* XXX. */ #undef count /* XXX need this non-static entry for calling from vm_map_print. */ void vm_object_print( /* db_expr_t */ long addr, boolean_t have_addr, /* db_expr_t */ long count, char *modif) { vm_object_print_static(addr, have_addr, count, modif); } DB_SHOW_COMMAND(vmopag, vm_object_print_pages) { vm_object_t object; int nl = 0; int c; TAILQ_FOREACH(object, &vm_object_list, object_list) { vm_pindex_t idx, fidx; vm_pindex_t osize; vm_paddr_t pa = -1; int rcount; vm_page_t m; db_printf("new object: %p\n", (void *)object); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; rcount = 0; fidx = 0; osize = object->size; if (osize > 128) osize = 128; for (idx = 0; idx < osize; idx++) { m = vm_page_lookup(object, idx); if (m == NULL) { if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; rcount = 0; } continue; } if (rcount && (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { ++rcount; continue; } if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; } fidx = idx; pa = VM_PAGE_TO_PHYS(m); rcount = 1; } if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); if (nl > 18) { c = cngetc(); if (c != ' ') return; nl = 0; } nl++; } } } #endif /* DDB */ Index: head/sys/vm/vm_page.c =================================================================== --- head/sys/vm/vm_page.c (revision 174981) +++ head/sys/vm/vm_page.c (revision 174982) @@ -1,2027 +1,2072 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 */ /*- * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Authors: Avadis Tevanian, Jr., Michael Wayne Young * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* * GENERAL RULES ON VM_PAGE MANIPULATION * * - a pageq mutex is required when adding or removing a page from a * page queue (vm_page_queue[]), regardless of other mutexes or the * busy state of a page. * * - a hash chain mutex is required when associating or disassociating * a page from the VM PAGE CACHE hash table (vm_page_buckets), * regardless of other mutexes or the busy state of a page. * * - either a hash chain mutex OR a busied page is required in order * to modify the page flags. A hash chain mutex must be obtained in * order to busy a page. A page's flags cannot be modified by a * hash chain mutex if the page is marked busy. * * - The object memq mutex is held when inserting or removing * pages from an object (vm_page_insert() or vm_page_remove()). This * is different from the object's main mutex. * * Generally speaking, you have to be aware of side effects when running * vm_page ops. A vm_page_lookup() will return with the hash chain * locked, whether it was able to lookup the page or not. vm_page_free(), * vm_page_cache(), vm_page_activate(), and a number of other routines * will release the hash chain mutex for you. Intermediate manipulation * routines such as vm_page_flag_set() expect the hash chain to be held * on entry and the hash chain will remain held on return. * * pageq scanning can only occur with the pageq in question locked. * We have a known bottleneck with the active queue, but the cache * and free queues are actually arrays already. */ /* * Resident memory management module. */ #include __FBSDID("$FreeBSD$"); +#include "opt_vm.h" + #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include /* * Associated with page of user-allocatable memory is a * page structure. */ struct mtx vm_page_queue_mtx; struct mtx vm_page_queue_free_mtx; vm_page_t vm_page_array = 0; int vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; static int boot_pages = UMA_BOOT_PAGES; TUNABLE_INT("vm.boot_pages", &boot_pages); SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, "number of pages allocated for bootstrapping the VM system"); /* * vm_set_page_size: * * Sets the page size, perhaps based upon the memory * size. Must be called before any use of page-size * dependent functions. */ void vm_set_page_size(void) { if (cnt.v_page_size == 0) cnt.v_page_size = PAGE_SIZE; if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) panic("vm_set_page_size: page size not a power of two"); } /* * vm_page_blacklist_lookup: * * See if a physical address in this page has been listed * in the blacklist tunable. Entries in the tunable are * separated by spaces or commas. If an invalid integer is * encountered then the rest of the string is skipped. */ static int vm_page_blacklist_lookup(char *list, vm_paddr_t pa) { vm_paddr_t bad; char *cp, *pos; for (pos = list; *pos != '\0'; pos = cp) { bad = strtoq(pos, &cp, 0); if (*cp != '\0') { if (*cp == ' ' || *cp == ',') { cp++; if (cp == pos) continue; } else break; } if (pa == trunc_page(bad)) return (1); } return (0); } /* * vm_page_startup: * * Initializes the resident memory module. * * Allocates memory for the page cells, and * for the object/offset-to-page hash table headers. * Each page cell is initialized and placed on the free list. */ vm_offset_t vm_page_startup(vm_offset_t vaddr) { vm_offset_t mapped; vm_paddr_t page_range; vm_paddr_t new_end; int i; vm_paddr_t pa; int nblocks; vm_paddr_t last_pa; char *list; /* the biggest memory array is the second group of pages */ vm_paddr_t end; vm_paddr_t biggestsize; vm_paddr_t low_water, high_water; int biggestone; biggestsize = 0; biggestone = 0; nblocks = 0; vaddr = round_page(vaddr); for (i = 0; phys_avail[i + 1]; i += 2) { phys_avail[i] = round_page(phys_avail[i]); phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); } low_water = phys_avail[0]; high_water = phys_avail[1]; for (i = 0; phys_avail[i + 1]; i += 2) { vm_paddr_t size = phys_avail[i + 1] - phys_avail[i]; if (size > biggestsize) { biggestone = i; biggestsize = size; } if (phys_avail[i] < low_water) low_water = phys_avail[i]; if (phys_avail[i + 1] > high_water) high_water = phys_avail[i + 1]; ++nblocks; } end = phys_avail[biggestone+1]; /* * Initialize the locks. */ mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL, MTX_DEF); /* * Initialize the queue headers for the hold queue, the active queue, * and the inactive queue. */ vm_pageq_init(); /* * Allocate memory for use when boot strapping the kernel memory * allocator. */ new_end = end - (boot_pages * UMA_SLAB_SIZE); new_end = trunc_page(new_end); mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)mapped, end - new_end); uma_startup((void *)mapped, boot_pages); #if defined(__amd64__) || defined(__i386__) /* * Allocate a bitmap to indicate that a random physical page * needs to be included in a minidump. * * The amd64 port needs this to indicate which direct map pages * need to be dumped, via calls to dump_add_page()/dump_drop_page(). * * However, i386 still needs this workspace internally within the * minidump code. In theory, they are not needed on i386, but are * included should the sf_buf code decide to use them. */ page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE; vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY); new_end -= vm_page_dump_size; vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end, new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE); bzero((void *)vm_page_dump, vm_page_dump_size); #endif /* * Compute the number of pages of memory that will be available for * use (taking into account the overhead of a page structure per * page). */ first_page = low_water / PAGE_SIZE; #ifdef VM_PHYSSEG_SPARSE page_range = 0; for (i = 0; phys_avail[i + 1] != 0; i += 2) page_range += atop(phys_avail[i + 1] - phys_avail[i]); #elif defined(VM_PHYSSEG_DENSE) page_range = high_water / PAGE_SIZE - first_page; #else #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." #endif end = new_end; /* * Reserve an unmapped guard page to trap access to vm_page_array[-1]. */ vaddr += PAGE_SIZE; /* * Initialize the mem entry structures now, and put them in the free * queue. */ new_end = trunc_page(end - page_range * sizeof(struct vm_page)); mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); vm_page_array = (vm_page_t) mapped; +#if VM_NRESERVLEVEL > 0 + /* + * Allocate memory for the reservation management system's data + * structures. + */ + new_end = vm_reserv_startup(&vaddr, new_end, high_water); +#endif #ifdef __amd64__ /* * pmap_map on amd64 comes out of the direct-map, not kvm like i386, * so the pages must be tracked for a crashdump to include this data. * This includes the vm_page_array and the early UMA bootstrap pages. */ for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE) dump_add_page(pa); #endif phys_avail[biggestone + 1] = new_end; /* * Clear all of the page structures */ bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); for (i = 0; i < page_range; i++) vm_page_array[i].order = VM_NFREEORDER; vm_page_array_size = page_range; /* * Initialize the physical memory allocator. */ vm_phys_init(); /* * Add every available physical page that is not blacklisted to * the free lists. */ cnt.v_page_count = 0; cnt.v_free_count = 0; list = getenv("vm.blacklist"); for (i = 0; phys_avail[i + 1] != 0; i += 2) { pa = phys_avail[i]; last_pa = phys_avail[i + 1]; while (pa < last_pa) { if (list != NULL && vm_page_blacklist_lookup(list, pa)) printf("Skipping page with pa 0x%jx\n", (uintmax_t)pa); else vm_phys_add_page(pa); pa += PAGE_SIZE; } } freeenv(list); +#if VM_NRESERVLEVEL > 0 + /* + * Initialize the reservation management system. + */ + vm_reserv_init(); +#endif return (vaddr); } void vm_page_flag_set(vm_page_t m, unsigned short bits) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); m->flags |= bits; } void vm_page_flag_clear(vm_page_t m, unsigned short bits) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); m->flags &= ~bits; } void vm_page_busy(vm_page_t m) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, ("vm_page_busy: page already busy!!!")); m->oflags |= VPO_BUSY; } /* * vm_page_flash: * * wakeup anyone waiting for the page. */ void vm_page_flash(vm_page_t m) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->oflags & VPO_WANTED) { m->oflags &= ~VPO_WANTED; wakeup(m); } } /* * vm_page_wakeup: * * clear the VPO_BUSY flag and wakeup anyone waiting for the * page. * */ void vm_page_wakeup(vm_page_t m) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!")); m->oflags &= ~VPO_BUSY; vm_page_flash(m); } void vm_page_io_start(vm_page_t m) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); m->busy++; } void vm_page_io_finish(vm_page_t m) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); m->busy--; if (m->busy == 0) vm_page_flash(m); } /* * Keep page from being freed by the page daemon * much of the same effect as wiring, except much lower * overhead and should be used only for *very* temporary * holding ("wiring"). */ void vm_page_hold(vm_page_t mem) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); mem->hold_count++; } void vm_page_unhold(vm_page_t mem) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); --mem->hold_count; KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) vm_page_free_toq(mem); } /* * vm_page_free: * * Free a page. */ void vm_page_free(vm_page_t m) { m->flags &= ~PG_ZERO; vm_page_free_toq(m); } /* * vm_page_free_zero: * * Free a page to the zerod-pages queue */ void vm_page_free_zero(vm_page_t m) { m->flags |= PG_ZERO; vm_page_free_toq(m); } /* * vm_page_sleep: * * Sleep and release the page queues lock. * * The object containing the given page must be locked. */ void vm_page_sleep(vm_page_t m, const char *msg) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (!mtx_owned(&vm_page_queue_mtx)) vm_page_lock_queues(); vm_page_flag_set(m, PG_REFERENCED); vm_page_unlock_queues(); /* * It's possible that while we sleep, the page will get * unbusied and freed. If we are holding the object * lock, we will assume we hold a reference to the object * such that even if m->object changes, we can re-lock * it. */ m->oflags |= VPO_WANTED; msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0); } /* * vm_page_dirty: * * make page all dirty */ void vm_page_dirty(vm_page_t m) { KASSERT((m->flags & PG_CACHED) == 0, ("vm_page_dirty: page in cache!")); KASSERT(!VM_PAGE_IS_FREE(m), ("vm_page_dirty: page is free!")); m->dirty = VM_PAGE_BITS_ALL; } /* * vm_page_splay: * * Implements Sleator and Tarjan's top-down splay algorithm. Returns * the vm_page containing the given pindex. If, however, that * pindex is not found in the vm_object, returns a vm_page that is * adjacent to the pindex, coming before or after it. */ vm_page_t vm_page_splay(vm_pindex_t pindex, vm_page_t root) { struct vm_page dummy; vm_page_t lefttreemax, righttreemin, y; if (root == NULL) return (root); lefttreemax = righttreemin = &dummy; for (;; root = y) { if (pindex < root->pindex) { if ((y = root->left) == NULL) break; if (pindex < y->pindex) { /* Rotate right. */ root->left = y->right; y->right = root; root = y; if ((y = root->left) == NULL) break; } /* Link into the new root's right tree. */ righttreemin->left = root; righttreemin = root; } else if (pindex > root->pindex) { if ((y = root->right) == NULL) break; if (pindex > y->pindex) { /* Rotate left. */ root->right = y->left; y->left = root; root = y; if ((y = root->right) == NULL) break; } /* Link into the new root's left tree. */ lefttreemax->right = root; lefttreemax = root; } else break; } /* Assemble the new root. */ lefttreemax->right = root->left; righttreemin->left = root->right; root->left = dummy.right; root->right = dummy.left; return (root); } /* * vm_page_insert: [ internal use only ] * * Inserts the given mem entry into the object and object list. * * The pagetables are not updated but will presumably fault the page * in if necessary, or if a kernel page the caller will at some point * enter the page into the kernel's pmap. We are not allowed to block * here so we *can't* do this anyway. * * The object and page must be locked. * This routine may not block. */ void vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { vm_page_t root; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (m->object != NULL) panic("vm_page_insert: page already inserted"); /* * Record the object/offset pair in this page */ m->object = object; m->pindex = pindex; /* * Now link into the object's ordered list of backed pages. */ root = object->root; if (root == NULL) { m->left = NULL; m->right = NULL; TAILQ_INSERT_TAIL(&object->memq, m, listq); } else { root = vm_page_splay(pindex, root); if (pindex < root->pindex) { m->left = root->left; m->right = root; root->left = NULL; TAILQ_INSERT_BEFORE(root, m, listq); } else if (pindex == root->pindex) panic("vm_page_insert: offset already allocated"); else { m->right = root->right; m->left = root; root->right = NULL; TAILQ_INSERT_AFTER(&object->memq, root, m, listq); } } object->root = m; object->generation++; /* * show that the object has one more resident page. */ object->resident_page_count++; /* * Hold the vnode until the last page is released. */ if (object->resident_page_count == 1 && object->type == OBJT_VNODE) vhold((struct vnode *)object->handle); /* * Since we are inserting a new and possibly dirty page, * update the object's OBJ_MIGHTBEDIRTY flag. */ if (m->flags & PG_WRITEABLE) vm_object_set_writeable_dirty(object); } /* * vm_page_remove: * NOTE: used by device pager as well -wfj * * Removes the given mem entry from the object/offset-page * table and the object page list, but do not invalidate/terminate * the backing store. * * The object and page must be locked. * The underlying pmap entry (if any) is NOT removed here. * This routine may not block. */ void vm_page_remove(vm_page_t m) { vm_object_t object; vm_page_t root; if ((object = m->object) == NULL) return; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (m->oflags & VPO_BUSY) { m->oflags &= ~VPO_BUSY; vm_page_flash(m); } mtx_assert(&vm_page_queue_mtx, MA_OWNED); /* * Now remove from the object's list of backed pages. */ if (m != object->root) vm_page_splay(m->pindex, object->root); if (m->left == NULL) root = m->right; else { root = vm_page_splay(m->pindex, m->left); root->right = m->right; } object->root = root; TAILQ_REMOVE(&object->memq, m, listq); /* * And show that the object has one fewer resident page. */ object->resident_page_count--; object->generation++; /* * The vnode may now be recycled. */ if (object->resident_page_count == 0 && object->type == OBJT_VNODE) vdrop((struct vnode *)object->handle); m->object = NULL; } /* * vm_page_lookup: * * Returns the page associated with the object/offset * pair specified; if none is found, NULL is returned. * * The object must be locked. * This routine may not block. * This is a critical path routine */ vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((m = object->root) != NULL && m->pindex != pindex) { m = vm_page_splay(pindex, m); if ((object->root = m)->pindex != pindex) m = NULL; } return (m); } /* * vm_page_rename: * * Move the given memory entry from its * current object to the specified target object/offset. * * The object must be locked. * This routine may not block. * * Note: swap associated with the page must be invalidated by the move. We * have to do this for several reasons: (1) we aren't freeing the * page, (2) we are dirtying the page, (3) the VM system is probably * moving the page from object A to B, and will then later move * the backing store from A to B and we can't have a conflict. * * Note: we *always* dirty the page. It is necessary both for the * fact that we moved it, and because we may be invalidating * swap. If the page is on the cache, we have to deactivate it * or vm_page_dirty() will panic. Dirty pages are not allowed * on the cache. */ void vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) { vm_page_remove(m); vm_page_insert(m, new_object, new_pindex); vm_page_dirty(m); } /* * Convert all of the given object's cached pages that have a * pindex within the given range into free pages. If the value * zero is given for "end", then the range's upper bound is * infinity. If the given object is backed by a vnode and it * transitions from having one or more cached pages to none, the * vnode's hold count is reduced. */ void vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { vm_page_t m, m_next; boolean_t empty; mtx_lock(&vm_page_queue_free_mtx); if (__predict_false(object->cache == NULL)) { mtx_unlock(&vm_page_queue_free_mtx); return; } m = object->cache = vm_page_splay(start, object->cache); if (m->pindex < start) { if (m->right == NULL) m = NULL; else { m_next = vm_page_splay(start, m->right); m_next->left = m; m->right = NULL; m = object->cache = m_next; } } /* * At this point, "m" is either (1) a reference to the page * with the least pindex that is greater than or equal to * "start" or (2) NULL. */ for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { /* * Find "m"'s successor and remove "m" from the * object's cache. */ if (m->right == NULL) { object->cache = m->left; m_next = NULL; } else { m_next = vm_page_splay(start, m->right); m_next->left = m->left; object->cache = m_next; } /* Convert "m" to a free page. */ m->object = NULL; m->valid = 0; /* Clear PG_CACHED and set PG_FREE. */ m->flags ^= PG_CACHED | PG_FREE; KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, ("vm_page_cache_free: page %p has inconsistent flags", m)); cnt.v_cache_count--; cnt.v_free_count++; } empty = object->cache == NULL; mtx_unlock(&vm_page_queue_free_mtx); if (object->type == OBJT_VNODE && empty) vdrop(object->handle); } /* * Returns the cached page that is associated with the given * object and offset. If, however, none exists, returns NULL. * * The free page queue must be locked. */ static inline vm_page_t vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); if ((m = object->cache) != NULL && m->pindex != pindex) { m = vm_page_splay(pindex, m); if ((object->cache = m)->pindex != pindex) m = NULL; } return (m); } /* * Remove the given cached page from its containing object's * collection of cached pages. * * The free page queue must be locked. */ void vm_page_cache_remove(vm_page_t m) { vm_object_t object; vm_page_t root; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); KASSERT((m->flags & PG_CACHED) != 0, ("vm_page_cache_remove: page %p is not cached", m)); object = m->object; if (m != object->cache) { root = vm_page_splay(m->pindex, object->cache); KASSERT(root == m, ("vm_page_cache_remove: page %p is not cached in object %p", m, object)); } if (m->left == NULL) root = m->right; else if (m->right == NULL) root = m->left; else { root = vm_page_splay(m->pindex, m->left); root->right = m->right; } object->cache = root; m->object = NULL; cnt.v_cache_count--; } /* * Transfer all of the cached pages with offset greater than or * equal to 'offidxstart' from the original object's cache to the * new object's cache. However, any cached pages with offset * greater than or equal to the new object's size are kept in the * original object. Initially, the new object's cache must be * empty. Offset 'offidxstart' in the original object must * correspond to offset zero in the new object. * * The new object must be locked. */ void vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, vm_object_t new_object) { vm_page_t m, m_next; /* * Insertion into an object's collection of cached pages * requires the object to be locked. In contrast, removal does * not. */ VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); KASSERT(new_object->cache == NULL, ("vm_page_cache_transfer: object %p has cached pages", new_object)); mtx_lock(&vm_page_queue_free_mtx); if ((m = orig_object->cache) != NULL) { /* * Transfer all of the pages with offset greater than or * equal to 'offidxstart' from the original object's * cache to the new object's cache. */ m = vm_page_splay(offidxstart, m); if (m->pindex < offidxstart) { orig_object->cache = m; new_object->cache = m->right; m->right = NULL; } else { orig_object->cache = m->left; new_object->cache = m; m->left = NULL; } while ((m = new_object->cache) != NULL) { if ((m->pindex - offidxstart) >= new_object->size) { /* * Return all of the cached pages with * offset greater than or equal to the * new object's size to the original * object's cache. */ new_object->cache = m->left; m->left = orig_object->cache; orig_object->cache = m; break; } m_next = vm_page_splay(m->pindex, m->right); /* Update the page's object and offset. */ m->object = new_object; m->pindex -= offidxstart; if (m_next == NULL) break; m->right = NULL; m_next->left = m; new_object->cache = m_next; } KASSERT(new_object->cache == NULL || new_object->type == OBJT_SWAP, ("vm_page_cache_transfer: object %p's type is incompatible" " with cached pages", new_object)); } mtx_unlock(&vm_page_queue_free_mtx); } /* * vm_page_alloc: * * Allocate and return a memory cell associated * with this VM object/offset pair. * * page_req classes: * VM_ALLOC_NORMAL normal process request * VM_ALLOC_SYSTEM system *really* needs a page * VM_ALLOC_INTERRUPT interrupt time request * VM_ALLOC_ZERO zero page * * This routine may not block. */ vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) { struct vnode *vp = NULL; vm_object_t m_object; vm_page_t m; int flags, page_req; page_req = req & VM_ALLOC_CLASS_MASK; KASSERT(curthread->td_intr_nesting_level == 0 || page_req == VM_ALLOC_INTERRUPT, ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context")); if ((req & VM_ALLOC_NOOBJ) == 0) { KASSERT(object != NULL, ("vm_page_alloc: NULL object.")); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); } /* * The pager is allowed to eat deeper into the free page list. */ if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { page_req = VM_ALLOC_SYSTEM; }; mtx_lock(&vm_page_queue_free_mtx); if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || (page_req == VM_ALLOC_SYSTEM && cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count + cnt.v_cache_count > 0)) { /* * Allocate from the free queue if the number of free pages * exceeds the minimum for the request class. */ if (object != NULL && (m = vm_page_cache_lookup(object, pindex)) != NULL) { if ((req & VM_ALLOC_IFNOTCACHED) != 0) { mtx_unlock(&vm_page_queue_free_mtx); return (NULL); } if (vm_phys_unfree_page(m)) vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0); +#if VM_NRESERVLEVEL > 0 + else if (!vm_reserv_reactivate_page(m)) +#else else +#endif panic("vm_page_alloc: cache page %p is missing" " from the free queue", m); } else if ((req & VM_ALLOC_IFCACHED) != 0) { mtx_unlock(&vm_page_queue_free_mtx); return (NULL); - } else +#if VM_NRESERVLEVEL > 0 + } else if (object == NULL || + (object->flags & OBJ_COLORED) == 0 || + (m = vm_reserv_alloc_page(object, pindex)) == NULL) { +#else + } else { +#endif m = vm_phys_alloc_pages(object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0); +#if VM_NRESERVLEVEL > 0 + if (m == NULL && vm_reserv_reclaim()) { + m = vm_phys_alloc_pages(object != NULL ? + VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, + 0); + } +#endif + } } else { /* * Not allocatable, give up. */ mtx_unlock(&vm_page_queue_free_mtx); atomic_add_int(&vm_pageout_deficit, 1); pagedaemon_wakeup(); return (NULL); } /* * At this point we had better have found a good page. */ KASSERT( m != NULL, ("vm_page_alloc(): missing page on free queue") ); if ((m->flags & PG_CACHED) != 0) { KASSERT(m->valid != 0, ("vm_page_alloc: cached page %p is invalid", m)); if (m->object == object && m->pindex == pindex) cnt.v_reactivated++; else m->valid = 0; m_object = m->object; vm_page_cache_remove(m); if (m_object->type == OBJT_VNODE && m_object->cache == NULL) vp = m_object->handle; } else { KASSERT(VM_PAGE_IS_FREE(m), ("vm_page_alloc: page %p is not free", m)); KASSERT(m->valid == 0, ("vm_page_alloc: free page %p is valid", m)); cnt.v_free_count--; } /* * Initialize structure. Only the PG_ZERO flag is inherited. */ flags = 0; if (m->flags & PG_ZERO) { vm_page_zero_count--; if (req & VM_ALLOC_ZERO) flags = PG_ZERO; } if (object == NULL || object->type == OBJT_PHYS) flags |= PG_UNMANAGED; m->flags = flags; if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) m->oflags = 0; else m->oflags = VPO_BUSY; if (req & VM_ALLOC_WIRED) { atomic_add_int(&cnt.v_wire_count, 1); m->wire_count = 1; } else m->wire_count = 0; m->hold_count = 0; m->act_count = 0; m->busy = 0; KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m)); mtx_unlock(&vm_page_queue_free_mtx); if ((req & VM_ALLOC_NOOBJ) == 0) vm_page_insert(m, object, pindex); else m->pindex = pindex; /* * The following call to vdrop() must come after the above call * to vm_page_insert() in case both affect the same object and * vnode. Otherwise, the affected vnode's hold count could * temporarily become zero. */ if (vp != NULL) vdrop(vp); /* * Don't wakeup too often - wakeup the pageout daemon when * we would be nearly out of memory. */ if (vm_paging_needed()) pagedaemon_wakeup(); return (m); } /* * vm_wait: (also see VM_WAIT macro) * * Block until free pages are available for allocation * - Called in various places before memory allocations. */ void vm_wait(void) { mtx_lock(&vm_page_queue_free_mtx); if (curproc == pageproc) { vm_pageout_pages_needed = 1; msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx, PDROP | PSWP, "VMWait", 0); } else { if (!vm_pages_needed) { vm_pages_needed = 1; wakeup(&vm_pages_needed); } msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, "vmwait", 0); } } /* * vm_waitpfault: (also see VM_WAITPFAULT macro) * * Block until free pages are available for allocation * - Called only in vm_fault so that processes page faulting * can be easily tracked. * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing * processes will be able to grab memory first. Do not change * this balance without careful testing first. */ void vm_waitpfault(void) { mtx_lock(&vm_page_queue_free_mtx); if (!vm_pages_needed) { vm_pages_needed = 1; wakeup(&vm_pages_needed); } msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, "pfault", 0); } /* * vm_page_activate: * * Put the specified page on the active list (if appropriate). * Ensure that act_count is at least ACT_INIT but do not otherwise * mess with it. * * The page queues must be locked. * This routine may not block. */ void vm_page_activate(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { vm_pageq_remove(m); if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; vm_pageq_enqueue(PQ_ACTIVE, m); } } else { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; } } /* * vm_page_free_wakeup: * * Helper routine for vm_page_free_toq() and vm_page_cache(). This * routine is called when a page has been added to the cache or free * queues. * * The page queues must be locked. * This routine may not block. */ static inline void vm_page_free_wakeup(void) { mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); /* * if pageout daemon needs pages, then tell it that there are * some free. */ if (vm_pageout_pages_needed && cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } /* * wakeup processes that are waiting on memory if we hit a * high water mark. And wakeup scheduler process if we have * lots of memory. this process will swapin processes. */ if (vm_pages_needed && !vm_page_count_min()) { vm_pages_needed = 0; wakeup(&cnt.v_free_count); } } /* * vm_page_free_toq: * * Returns the given page to the free list, * disassociating it with any VM object. * * Object and page must be locked prior to entry. * This routine may not block. */ void vm_page_free_toq(vm_page_t m) { if (VM_PAGE_GETQUEUE(m) != PQ_NONE) mtx_assert(&vm_page_queue_mtx, MA_OWNED); KASSERT(!pmap_page_is_mapped(m), ("vm_page_free_toq: freeing mapped page %p", m)); PCPU_INC(cnt.v_tfree); if (m->busy || VM_PAGE_IS_FREE(m)) { printf( "vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n", (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0, m->hold_count); if (VM_PAGE_IS_FREE(m)) panic("vm_page_free: freeing free page"); else panic("vm_page_free: freeing busy page"); } /* * unqueue, then remove page. Note that we cannot destroy * the page here because we do not want to call the pager's * callback routine until after we've put the page on the * appropriate free queue. */ vm_pageq_remove(m); vm_page_remove(m); /* * If fictitious remove object association and * return, otherwise delay object association removal. */ if ((m->flags & PG_FICTITIOUS) != 0) { return; } m->valid = 0; vm_page_undirty(m); if (m->wire_count != 0) { if (m->wire_count > 1) { panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx", m->wire_count, (long)m->pindex); } panic("vm_page_free: freeing wired page"); } if (m->hold_count != 0) { m->flags &= ~PG_ZERO; vm_pageq_enqueue(PQ_HOLD, m); } else { m->flags |= PG_FREE; mtx_lock(&vm_page_queue_free_mtx); cnt.v_free_count++; - vm_phys_free_pages(m, 0); +#if VM_NRESERVLEVEL > 0 + if (!vm_reserv_free_page(m)) +#else + if (TRUE) +#endif + vm_phys_free_pages(m, 0); if ((m->flags & PG_ZERO) != 0) ++vm_page_zero_count; else vm_page_zero_idle_wakeup(); vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); } } /* * vm_page_wire: * * Mark this page as wired down by yet * another map, removing it from paging queues * as necessary. * * The page queues must be locked. * This routine may not block. */ void vm_page_wire(vm_page_t m) { /* * Only bump the wire statistics if the page is not already wired, * and only unqueue the page if it is on some queue (if it is unmanaged * it is already off the queues). */ mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count == 0) { if ((m->flags & PG_UNMANAGED) == 0) vm_pageq_remove(m); atomic_add_int(&cnt.v_wire_count, 1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); } /* * vm_page_unwire: * * Release one wiring of this page, potentially * enabling it to be paged again. * * Many pages placed on the inactive queue should actually go * into the cache, but it is difficult to figure out which. What * we do instead, if the inactive target is well met, is to put * clean pages at the head of the inactive queue instead of the tail. * This will cause them to be moved to the cache more quickly and * if not actively re-referenced, freed more quickly. If we just * stick these pages at the end of the inactive queue, heavy filesystem * meta-data accesses can cause an unnecessary paging load on memory bound * processes. This optimization causes one-time-use metadata to be * reused more quickly. * * BUT, if we are in a low-memory situation we have no choice but to * put clean pages on the cache queue. * * A number of routines use vm_page_unwire() to guarantee that the page * will go into either the inactive or active queues, and will NEVER * be placed in the cache - for example, just after dirtying a page. * dirty pages in the cache are not allowed. * * The page queues must be locked. * This routine may not block. */ void vm_page_unwire(vm_page_t m, int activate) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { atomic_subtract_int(&cnt.v_wire_count, 1); if (m->flags & PG_UNMANAGED) { ; } else if (activate) vm_pageq_enqueue(PQ_ACTIVE, m); else { vm_page_flag_clear(m, PG_WINATCFLS); vm_pageq_enqueue(PQ_INACTIVE, m); } } } else { panic("vm_page_unwire: invalid wire count: %d", m->wire_count); } } /* * Move the specified page to the inactive queue. If the page has * any associated swap, the swap is deallocated. * * Normally athead is 0 resulting in LRU operation. athead is set * to 1 if we want this page to be 'as if it were placed in the cache', * except without unmapping it from the process address space. * * This routine may not block. */ static inline void _vm_page_deactivate(vm_page_t m, int athead) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); /* * Ignore if already inactive. */ if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) return; if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { vm_page_flag_clear(m, PG_WINATCFLS); vm_pageq_remove(m); if (athead) TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); else TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); VM_PAGE_SETQUEUE2(m, PQ_INACTIVE); cnt.v_inactive_count++; } } void vm_page_deactivate(vm_page_t m) { _vm_page_deactivate(m, 0); } /* * vm_page_try_to_cache: * * Returns 0 on failure, 1 on success */ int vm_page_try_to_cache(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) { return (0); } pmap_remove_all(m); if (m->dirty) return (0); vm_page_cache(m); return (1); } /* * vm_page_try_to_free() * * Attempt to free the page. If we cannot free it, we do nothing. * 1 is returned on success, 0 on failure. */ int vm_page_try_to_free(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->object != NULL) VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) { return (0); } pmap_remove_all(m); if (m->dirty) return (0); vm_page_free(m); return (1); } /* * vm_page_cache * * Put the specified page onto the page cache queue (if appropriate). * * This routine may not block. */ void vm_page_cache(vm_page_t m) { vm_object_t object; vm_page_t root; mtx_assert(&vm_page_queue_mtx, MA_OWNED); object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy || m->hold_count || m->wire_count) { panic("vm_page_cache: attempting to cache busy page"); } pmap_remove_all(m); if (m->dirty != 0) panic("vm_page_cache: page %p is dirty", m); if (m->valid == 0 || object->type == OBJT_DEFAULT || (object->type == OBJT_SWAP && !vm_pager_has_page(object, m->pindex, NULL, NULL))) { /* * Hypothesis: A cache-elgible page belonging to a * default object or swap object but without a backing * store must be zero filled. */ vm_page_free(m); return; } KASSERT((m->flags & PG_CACHED) == 0, ("vm_page_cache: page %p is already cached", m)); cnt.v_tcached++; /* * Remove the page from the paging queues. */ vm_pageq_remove(m); /* * Remove the page from the object's collection of resident * pages. */ if (m != object->root) vm_page_splay(m->pindex, object->root); if (m->left == NULL) root = m->right; else { root = vm_page_splay(m->pindex, m->left); root->right = m->right; } object->root = root; TAILQ_REMOVE(&object->memq, m, listq); object->resident_page_count--; object->generation++; /* * Insert the page into the object's collection of cached pages * and the physical memory allocator's cache/free page queues. */ vm_page_flag_set(m, PG_CACHED); vm_page_flag_clear(m, PG_ZERO); mtx_lock(&vm_page_queue_free_mtx); - vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); cnt.v_cache_count++; root = object->cache; if (root == NULL) { m->left = NULL; m->right = NULL; } else { root = vm_page_splay(m->pindex, root); if (m->pindex < root->pindex) { m->left = root->left; m->right = root; root->left = NULL; } else if (__predict_false(m->pindex == root->pindex)) panic("vm_page_cache: offset already cached"); else { m->right = root->right; m->left = root; root->right = NULL; } } object->cache = m; - vm_phys_free_pages(m, 0); +#if VM_NRESERVLEVEL > 0 + if (!vm_reserv_free_page(m)) { +#else + if (TRUE) { +#endif + vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0); + vm_phys_free_pages(m, 0); + } vm_page_free_wakeup(); mtx_unlock(&vm_page_queue_free_mtx); /* * Increment the vnode's hold count if this is the object's only * cached page. Decrement the vnode's hold count if this was * the object's only resident page. */ if (object->type == OBJT_VNODE) { if (root == NULL && object->resident_page_count != 0) vhold(object->handle); else if (root != NULL && object->resident_page_count == 0) vdrop(object->handle); } } /* * vm_page_dontneed * * Cache, deactivate, or do nothing as appropriate. This routine * is typically used by madvise() MADV_DONTNEED. * * Generally speaking we want to move the page into the cache so * it gets reused quickly. However, this can result in a silly syndrome * due to the page recycling too quickly. Small objects will not be * fully cached. On the otherhand, if we move the page to the inactive * queue we wind up with a problem whereby very large objects * unnecessarily blow away our inactive and cache queues. * * The solution is to move the pages based on a fixed weighting. We * either leave them alone, deactivate them, or move them to the cache, * where moving them to the cache has the highest weighting. * By forcing some pages into other queues we eventually force the * system to balance the queues, potentially recovering other unrelated * space from active. The idea is to not force this to happen too * often. */ void vm_page_dontneed(vm_page_t m) { static int dnweight; int dnw; int head; mtx_assert(&vm_page_queue_mtx, MA_OWNED); dnw = ++dnweight; /* * occassionally leave the page alone */ if ((dnw & 0x01F0) == 0 || VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) { if (m->act_count >= ACT_INIT) --m->act_count; return; } if (m->dirty == 0 && pmap_is_modified(m)) vm_page_dirty(m); if (m->dirty || (dnw & 0x0070) == 0) { /* * Deactivate the page 3 times out of 32. */ head = 0; } else { /* * Cache the page 28 times out of every 32. Note that * the page is deactivated instead of cached, but placed * at the head of the queue instead of the tail. */ head = 1; } _vm_page_deactivate(m, head); } /* * Grab a page, waiting until we are waken up due to the page * changing state. We keep on waiting, if the page continues * to be in the object. If the page doesn't exist, first allocate it * and then conditionally zero it. * * This routine may block. */ vm_page_t vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags) { vm_page_t m; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { if (vm_page_sleep_if_busy(m, TRUE, "pgrbwt")) { if ((allocflags & VM_ALLOC_RETRY) == 0) return (NULL); goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { vm_page_lock_queues(); vm_page_wire(m); vm_page_unlock_queues(); } if ((allocflags & VM_ALLOC_NOBUSY) == 0) vm_page_busy(m); return (m); } } m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); if (m == NULL) { VM_OBJECT_UNLOCK(object); VM_WAIT; VM_OBJECT_LOCK(object); if ((allocflags & VM_ALLOC_RETRY) == 0) return (NULL); goto retrylookup; } else if (m->valid != 0) return (m); if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0) pmap_zero_page(m); return (m); } /* * Mapping function for valid bits or for dirty bits in * a page. May not block. * * Inputs are required to range within a page. */ int vm_page_bits(int base, int size) { int first_bit; int last_bit; KASSERT( base + size <= PAGE_SIZE, ("vm_page_bits: illegal base/size %d/%d", base, size) ); if (size == 0) /* handle degenerate case */ return (0); first_bit = base >> DEV_BSHIFT; last_bit = (base + size - 1) >> DEV_BSHIFT; return ((2 << last_bit) - (1 << first_bit)); } /* * vm_page_set_validclean: * * Sets portions of a page valid and clean. The arguments are expected * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive * of any partial chunks touched by the range. The invalid portion of * such chunks will be zero'd. * * This routine may not block. * * (base + size) must be less then or equal to PAGE_SIZE. */ void vm_page_set_validclean(vm_page_t m, int base, int size) { int pagebits; int frag; int endoff; mtx_assert(&vm_page_queue_mtx, MA_OWNED); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (size == 0) /* handle degenerate case */ return; /* * If the base is not DEV_BSIZE aligned and the valid * bit is clear, we have to zero out a portion of the * first block. */ if ((frag = base & ~(DEV_BSIZE - 1)) != base && (m->valid & (1 << (base >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, frag, base - frag); /* * If the ending offset is not DEV_BSIZE aligned and the * valid bit is clear, we have to zero out a portion of * the last block. */ endoff = base + size; if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0) pmap_zero_page_area(m, endoff, DEV_BSIZE - (endoff & (DEV_BSIZE - 1))); /* * Set valid, clear dirty bits. If validating the entire * page we can safely clear the pmap modify bit. We also * use this opportunity to clear the VPO_NOSYNC flag. If a process * takes a write fault on a MAP_NOSYNC memory area the flag will * be set again. * * We set valid bits inclusive of any overlap, but we can only * clear dirty bits for DEV_BSIZE chunks that are fully within * the range. */ pagebits = vm_page_bits(base, size); m->valid |= pagebits; #if 0 /* NOT YET */ if ((frag = base & (DEV_BSIZE - 1)) != 0) { frag = DEV_BSIZE - frag; base += frag; size -= frag; if (size < 0) size = 0; } pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1)); #endif m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { pmap_clear_modify(m); m->oflags &= ~VPO_NOSYNC; } } void vm_page_clear_dirty(vm_page_t m, int base, int size) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); m->dirty &= ~vm_page_bits(base, size); } /* * vm_page_set_invalid: * * Invalidates DEV_BSIZE'd chunks within a page. Both the * valid and dirty bits for the effected areas are cleared. * * May not block. */ void vm_page_set_invalid(vm_page_t m, int base, int size) { int bits; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); bits = vm_page_bits(base, size); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->valid == VM_PAGE_BITS_ALL && bits != 0) pmap_remove_all(m); m->valid &= ~bits; m->dirty &= ~bits; m->object->generation++; } /* * vm_page_zero_invalid() * * The kernel assumes that the invalid portions of a page contain * garbage, but such pages can be mapped into memory by user code. * When this occurs, we must zero out the non-valid portions of the * page so user code sees what it expects. * * Pages are most often semi-valid when the end of a file is mapped * into memory and the file's size is not page aligned. */ void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) { int b; int i; VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); /* * Scan the valid bits looking for invalid sections that * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the * valid bit may be set ) have already been zerod by * vm_page_set_validclean(). */ for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { if (i == (PAGE_SIZE / DEV_BSIZE) || (m->valid & (1 << i)) ) { if (i > b) { pmap_zero_page_area(m, b << DEV_BSHIFT, (i - b) << DEV_BSHIFT); } b = i + 1; } } /* * setvalid is TRUE when we can safely set the zero'd areas * as being valid. We can do this if there are no cache consistancy * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. */ if (setvalid) m->valid = VM_PAGE_BITS_ALL; } /* * vm_page_is_valid: * * Is (partial) page valid? Note that the case where size == 0 * will return FALSE in the degenerate case where the page is * entirely invalid, and TRUE otherwise. * * May not block. */ int vm_page_is_valid(vm_page_t m, int base, int size) { int bits = vm_page_bits(base, size); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->valid && ((m->valid & bits) == bits)) return 1; else return 0; } /* * update dirty bits from pmap/mmu. May not block. */ void vm_page_test_dirty(vm_page_t m) { if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { vm_page_dirty(m); } } int so_zerocp_fullpage = 0; /* * Replace the given page with a copy. The copied page assumes * the portion of the given page's "wire_count" that is not the * responsibility of this copy-on-write mechanism. * * The object containing the given page must have a non-zero * paging-in-progress count and be locked. */ void vm_page_cowfault(vm_page_t m) { vm_page_t mnew; vm_object_t object; vm_pindex_t pindex; object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->paging_in_progress != 0, ("vm_page_cowfault: object %p's paging-in-progress count is zero.", object)); pindex = m->pindex; retry_alloc: pmap_remove_all(m); vm_page_remove(m); mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); if (mnew == NULL) { vm_page_insert(m, object, pindex); vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); VM_WAIT; VM_OBJECT_LOCK(object); if (m == vm_page_lookup(object, pindex)) { vm_page_lock_queues(); goto retry_alloc; } else { /* * Page disappeared during the wait. */ vm_page_lock_queues(); return; } } if (m->cow == 0) { /* * check to see if we raced with an xmit complete when * waiting to allocate a page. If so, put things back * the way they were */ vm_page_free(mnew); vm_page_insert(m, object, pindex); } else { /* clear COW & copy page */ if (!so_zerocp_fullpage) pmap_copy_page(m, mnew); mnew->valid = VM_PAGE_BITS_ALL; vm_page_dirty(mnew); mnew->wire_count = m->wire_count - m->cow; m->wire_count = m->cow; } } void vm_page_cowclear(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (m->cow) { m->cow--; /* * let vm_fault add back write permission lazily */ } /* * sf_buf_free() will free the page, so we needn't do it here */ } void vm_page_cowsetup(vm_page_t m) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); m->cow++; pmap_remove_write(m); } #include "opt_ddb.h" #ifdef DDB #include #include DB_SHOW_COMMAND(page, vm_page_print_page_info) { db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) { db_printf("PQ_FREE:"); db_printf(" %d", cnt.v_free_count); db_printf("\n"); db_printf("PQ_CACHE:"); db_printf(" %d", cnt.v_cache_count); db_printf("\n"); db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", *vm_page_queues[PQ_ACTIVE].cnt, *vm_page_queues[PQ_INACTIVE].cnt); } #endif /* DDB */ Index: head/sys/vm/vm_reserv.c =================================================================== --- head/sys/vm/vm_reserv.c (nonexistent) +++ head/sys/vm/vm_reserv.c (revision 174982) @@ -0,0 +1,668 @@ +/*- + * Copyright (c) 2002-2006 Rice University + * Copyright (c) 2007 Alan L. Cox + * All rights reserved. + * + * This software was developed for the FreeBSD Project by Alan L. Cox, + * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY + * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Superpage reservation management module + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_vm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * The reservation system supports the speculative allocation of large physical + * pages ("superpages"). Speculative allocation enables the fully-automatic + * utilization of superpages by the virtual memory system. In other words, no + * programmatic directives are required to use superpages. + */ + +#if VM_NRESERVLEVEL > 0 + +/* + * The number of small pages that are contained in a level 0 reservation + */ +#define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER) + +/* + * The number of bits by which a physical address is shifted to obtain the + * reservation number + */ +#define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT) + +/* + * The size of a level 0 reservation in bytes + */ +#define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT) + +/* + * Computes the index of the small page underlying the given (object, pindex) + * within the reservation's array of small pages. + */ +#define VM_RESERV_INDEX(object, pindex) \ + (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1)) + +/* + * The reservation structure + * + * A reservation structure is constructed whenever a large physical page is + * speculatively allocated to an object. The reservation provides the small + * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets + * within that object. The reservation's "popcnt" tracks the number of these + * small physical pages that are in use at any given time. When and if the + * reservation is not fully utilized, it appears in the queue of partially- + * populated reservations. The reservation always appears on the containing + * object's list of reservations. + * + * A partially-populated reservation can be broken and reclaimed at any time. + */ +struct vm_reserv { + TAILQ_ENTRY(vm_reserv) partpopq; + LIST_ENTRY(vm_reserv) objq; + vm_object_t object; /* containing object */ + vm_pindex_t pindex; /* offset within object */ + vm_page_t pages; /* first page of a superpage */ + int popcnt; /* # of pages in use */ + char inpartpopq; +}; + +/* + * The reservation array + * + * This array is analoguous in function to vm_page_array. It differs in the + * respect that it may contain a greater number of useful reservation + * structures than there are (physical) superpages. These "invalid" + * reservation structures exist to trade-off space for time in the + * implementation of vm_reserv_from_page(). Invalid reservation structures are + * distinguishable from "valid" reservation structures by inspecting the + * reservation's "pages" field. Invalid reservation structures have a NULL + * "pages" field. + * + * vm_reserv_from_page() maps a small (physical) page to an element of this + * array by computing a physical reservation number from the page's physical + * address. The physical reservation number is used as the array index. + * + * An "active" reservation is a valid reservation structure that has a non-NULL + * "object" field and a non-zero "popcnt" field. In other words, every active + * reservation belongs to a particular object. Moreover, every active + * reservation has an entry in the containing object's list of reservations. + */ +static vm_reserv_t vm_reserv_array; + +/* + * The partially-populated reservation queue + * + * This queue enables the fast recovery of an unused cached or free small page + * from a partially-populated reservation. The head of this queue is either + * the least-recently-populated or most-recently-depopulated reservation. + * + * Access to this queue is synchronized by the free page queue lock. + */ +static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop = + TAILQ_HEAD_INITIALIZER(vm_rvq_partpop); + +static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info"); + +static long vm_reserv_broken; +SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD, + &vm_reserv_broken, 0, "Cumulative number of broken reservations"); + +static long vm_reserv_freed; +SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD, + &vm_reserv_freed, 0, "Cumulative number of freed reservations"); + +static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS); + +SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, + sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues"); + +static long vm_reserv_reclaimed; +SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD, + &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations"); + +static void vm_reserv_depopulate(vm_reserv_t rv); +static vm_reserv_t vm_reserv_from_page(vm_page_t m); +static boolean_t vm_reserv_has_pindex(vm_reserv_t rv, + vm_pindex_t pindex); +static void vm_reserv_populate(vm_reserv_t rv); + +/* + * Describes the current state of the partially-populated reservation queue. + */ +static int +sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS) +{ + struct sbuf sbuf; + vm_reserv_t rv; + char *cbuf; + const int cbufsize = (VM_NRESERVLEVEL + 1) * 81; + int counter, error, level, unused_pages; + + cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO); + sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN); + sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n"); + for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { + counter = 0; + unused_pages = 0; + mtx_lock(&vm_page_queue_free_mtx); + TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) { + counter++; + unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; + } + mtx_unlock(&vm_page_queue_free_mtx); + sbuf_printf(&sbuf, "%5.5d: %6.6dK, %6.6d\n", level, + unused_pages * (PAGE_SIZE / 1024), counter); + } + sbuf_finish(&sbuf); + error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); + sbuf_delete(&sbuf); + free(cbuf, M_TEMP); + return (error); +} + +/* + * Reduces the given reservation's population count. If the population count + * becomes zero, the reservation is destroyed. Additionally, moves the + * reservation to the head of the partially-populated reservations queue if the + * population count is non-zero. + * + * The free page queue lock must be held. + */ +static void +vm_reserv_depopulate(vm_reserv_t rv) +{ + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + KASSERT(rv->object != NULL, + ("vm_reserv_depopulate: reserv %p is free", rv)); + KASSERT(rv->popcnt > 0, + ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv)); + if (rv->inpartpopq) { + TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); + rv->inpartpopq = FALSE; + } + rv->popcnt--; + if (rv->popcnt == 0) { + LIST_REMOVE(rv, objq); + rv->object = NULL; + vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER); + vm_reserv_freed++; + } else { + rv->inpartpopq = TRUE; + TAILQ_INSERT_HEAD(&vm_rvq_partpop, rv, partpopq); + } +} + +/* + * Returns the reservation to which the given page might belong. + */ +static __inline vm_reserv_t +vm_reserv_from_page(vm_page_t m) +{ + + return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]); +} + +/* + * Returns TRUE if the given reservation contains the given page index and + * FALSE otherwise. + */ +static __inline boolean_t +vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex) +{ + + return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); +} + +/* + * Increases the given reservation's population count. Moves the reservation + * to the tail of the partially-populated reservation queue. + * + * The free page queue must be locked. + */ +static void +vm_reserv_populate(vm_reserv_t rv) +{ + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + KASSERT(rv->object != NULL, + ("vm_reserv_populate: reserv %p is free", rv)); + KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, + ("vm_reserv_populate: reserv %p is already full", rv)); + if (rv->inpartpopq) { + TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); + rv->inpartpopq = FALSE; + } + rv->popcnt++; + if (rv->popcnt < VM_LEVEL_0_NPAGES) { + rv->inpartpopq = TRUE; + TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq); + } +} + +/* + * Allocates a page from an existing or newly-created reservation. + * + * The object and free page queue must be locked. + */ +vm_page_t +vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex) +{ + vm_page_t m, mpred, msucc; + vm_pindex_t first, leftcap, rightcap; + vm_reserv_t rv; + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + + /* + * Is a reservation fundamentally not possible? + */ + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + if (pindex < VM_RESERV_INDEX(object, pindex) || + pindex >= object->size) + return (NULL); + + /* + * Look for an existing reservation. + */ + msucc = NULL; + mpred = object->root; + while (mpred != NULL) { + KASSERT(mpred->pindex != pindex, + ("vm_reserv_alloc_page: pindex already allocated")); + rv = vm_reserv_from_page(mpred); + if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) { + m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; + /* Handle vm_page_rename(m, new_object, ...). */ + if ((m->flags & (PG_CACHED | PG_FREE)) == 0) + return (NULL); + vm_reserv_populate(rv); + return (m); + } else if (mpred->pindex < pindex) { + if (msucc != NULL || + (msucc = TAILQ_NEXT(mpred, listq)) == NULL) + break; + KASSERT(msucc->pindex != pindex, + ("vm_reserv_alloc_page: pindex already allocated")); + rv = vm_reserv_from_page(msucc); + if (rv->object == object && + vm_reserv_has_pindex(rv, pindex)) { + m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; + /* Handle vm_page_rename(m, new_object, ...). */ + if ((m->flags & (PG_CACHED | PG_FREE)) == 0) + return (NULL); + vm_reserv_populate(rv); + return (m); + } else if (pindex < msucc->pindex) + break; + } else if (msucc == NULL) { + msucc = mpred; + mpred = TAILQ_PREV(msucc, pglist, listq); + continue; + } + msucc = NULL; + mpred = object->root = vm_page_splay(pindex, object->root); + } + + /* + * Determine the first index to the left that can be used. + */ + if (mpred == NULL) + leftcap = 0; + else if ((rv = vm_reserv_from_page(mpred))->object != object) + leftcap = mpred->pindex + 1; + else + leftcap = rv->pindex + VM_LEVEL_0_NPAGES; + + /* + * Determine the first index to the right that cannot be used. + */ + if (msucc == NULL) + rightcap = pindex + VM_LEVEL_0_NPAGES; + else if ((rv = vm_reserv_from_page(msucc))->object != object) + rightcap = msucc->pindex; + else + rightcap = rv->pindex; + + /* + * Determine if a reservation fits between the first index to + * the left that can be used and the first index to the right + * that cannot be used. + */ + first = pindex - VM_RESERV_INDEX(object, pindex); + if (first < leftcap || first + VM_LEVEL_0_NPAGES > rightcap) + return (NULL); + + /* + * Would a new reservation extend past the end of the given object? + */ + if (object->size < first + VM_LEVEL_0_NPAGES) { + /* + * Don't allocate a new reservation if the object is a vnode or + * backed by another object that is a vnode. + */ + if (object->type == OBJT_VNODE || + (object->backing_object != NULL && + object->backing_object->type == OBJT_VNODE)) + return (NULL); + /* Speculate that the object may grow. */ + } + + /* + * Allocate a new reservation. + */ + m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER); + if (m != NULL) { + rv = vm_reserv_from_page(m); + KASSERT(rv->pages == m, + ("vm_reserv_alloc_page: reserv %p's pages is corrupted", + rv)); + KASSERT(rv->object == NULL, + ("vm_reserv_alloc_page: reserv %p isn't free", rv)); + LIST_INSERT_HEAD(&object->rvq, rv, objq); + rv->object = object; + rv->pindex = first; + KASSERT(rv->popcnt == 0, + ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", + rv)); + KASSERT(!rv->inpartpopq, + ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", + rv)); + vm_reserv_populate(rv); + m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; + } + return (m); +} + +/* + * Breaks all reservations belonging to the given object. + */ +void +vm_reserv_break_all(vm_object_t object) +{ + vm_reserv_t rv; + int i; + + mtx_lock(&vm_page_queue_free_mtx); + while ((rv = LIST_FIRST(&object->rvq)) != NULL) { + KASSERT(rv->object == object, + ("vm_reserv_break_all: reserv %p is corrupted", rv)); + if (rv->inpartpopq) { + TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); + rv->inpartpopq = FALSE; + } + LIST_REMOVE(rv, objq); + rv->object = NULL; + for (i = 0; i < VM_LEVEL_0_NPAGES; i++) { + if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + vm_phys_free_pages(&rv->pages[i], 0); + else + rv->popcnt--; + } + KASSERT(rv->popcnt == 0, + ("vm_reserv_break_all: reserv %p's popcnt is corrupted", + rv)); + vm_reserv_broken++; + } + mtx_unlock(&vm_page_queue_free_mtx); +} + +/* + * Frees the given page if it belongs to a reservation. Returns TRUE if the + * page is freed and FALSE otherwise. + * + * The free page queue lock must be held. + */ +boolean_t +vm_reserv_free_page(vm_page_t m) +{ + vm_reserv_t rv; + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + rv = vm_reserv_from_page(m); + if (rv->object != NULL) { + vm_reserv_depopulate(rv); + return (TRUE); + } + return (FALSE); +} + +/* + * Initializes the reservation management system. Specifically, initializes + * the reservation array. + * + * Requires that vm_page_array and first_page are initialized! + */ +void +vm_reserv_init(void) +{ + vm_paddr_t paddr; + int i; + + /* + * Initialize the reservation array. Specifically, initialize the + * "pages" field for every element that has an underlying superpage. + */ + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE); + while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) { + vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages = + PHYS_TO_VM_PAGE(paddr); + paddr += VM_LEVEL_0_SIZE; + } + } +} + +/* + * Returns a reservation level if the given page belongs to a fully-populated + * reservation and -1 otherwise. + */ +int +vm_reserv_level_iffullpop(vm_page_t m) +{ + vm_reserv_t rv; + + rv = vm_reserv_from_page(m); + return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1); +} + +/* + * Prepare for the reactivation of a cached page. + * + * First, suppose that the given page "m" was allocated individually, i.e., not + * as part of a reservation, and cached. Then, suppose a reservation + * containing "m" is allocated by the same object. Although "m" and the + * reservation belong to the same object, "m"'s pindex may not match the + * reservation's. + * + * The free page queue must be locked. + */ +boolean_t +vm_reserv_reactivate_page(vm_page_t m) +{ + vm_reserv_t rv; + int i, m_index; + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + rv = vm_reserv_from_page(m); + if (rv->object == NULL) + return (FALSE); + KASSERT((m->flags & PG_CACHED) != 0, + ("vm_reserv_uncache_page: page %p is not cached", m)); + if (m->object == rv->object && + m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex)) + vm_reserv_populate(rv); + else { + KASSERT(rv->inpartpopq, + ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE", + rv)); + TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); + rv->inpartpopq = FALSE; + LIST_REMOVE(rv, objq); + rv->object = NULL; + /* Don't vm_phys_free_pages(m, 0). */ + m_index = m - rv->pages; + for (i = 0; i < m_index; i++) { + if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + vm_phys_free_pages(&rv->pages[i], 0); + else + rv->popcnt--; + } + for (i++; i < VM_LEVEL_0_NPAGES; i++) { + if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + vm_phys_free_pages(&rv->pages[i], 0); + else + rv->popcnt--; + } + KASSERT(rv->popcnt == 0, + ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted", + rv)); + vm_reserv_broken++; + } + return (TRUE); +} + +/* + * Breaks the reservation at the head of the partially-populated reservation + * queue, releasing its cached and free pages to the physical memory + * allocator. Returns TRUE if a reservation is broken and FALSE otherwise. + * + * The free page queue lock must be held. + */ +boolean_t +vm_reserv_reclaim(void) +{ + vm_reserv_t rv; + int i; + + mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); + if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) { + KASSERT(rv->inpartpopq, + ("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", + rv)); + TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq); + rv->inpartpopq = FALSE; + KASSERT(rv->object != NULL, + ("vm_reserv_reclaim: reserv %p is free", rv)); + LIST_REMOVE(rv, objq); + rv->object = NULL; + for (i = 0; i < VM_LEVEL_0_NPAGES; i++) { + if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) + vm_phys_free_pages(&rv->pages[i], 0); + else + rv->popcnt--; + } + KASSERT(rv->popcnt == 0, + ("vm_reserv_reclaim: reserv %p's popcnt is corrupted", + rv)); + vm_reserv_reclaimed++; + return (TRUE); + } + return (FALSE); +} + +/* + * Transfers the reservation underlying the given page to a new object. + * + * The object must be locked. + */ +void +vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object, + vm_pindex_t old_object_offset) +{ + vm_reserv_t rv; + + VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); + rv = vm_reserv_from_page(m); + if (rv->object == old_object) { + mtx_lock(&vm_page_queue_free_mtx); + if (rv->object == old_object) { + LIST_REMOVE(rv, objq); + LIST_INSERT_HEAD(&new_object->rvq, rv, objq); + rv->object = new_object; + rv->pindex -= old_object_offset; + } + mtx_unlock(&vm_page_queue_free_mtx); + } +} + +/* + * Allocates the virtual and physical memory required by the reservation + * management system's data structures, in particular, the reservation array. + */ +vm_paddr_t +vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water) +{ + vm_paddr_t new_end; + size_t size; + + /* + * Calculate the size (in bytes) of the reservation array. Round up + * from "high_water" because every small page is mapped to an element + * in the reservation array based on its physical address. Thus, the + * number of elements in the reservation array can be greater than the + * number of superpages. + */ + size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); + + /* + * Allocate and map the physical memory for the reservation array. The + * next available virtual address is returned by reference. + */ + new_end = end - round_page(size); + vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, + VM_PROT_READ | VM_PROT_WRITE); + bzero(vm_reserv_array, size); + + /* + * Return the next available physical address. + */ + return (new_end); +} + +#endif /* VM_NRESERVLEVEL > 0 */ Property changes on: head/sys/vm/vm_reserv.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/vm/vm_reserv.h =================================================================== --- head/sys/vm/vm_reserv.h (nonexistent) +++ head/sys/vm/vm_reserv.h (revision 174982) @@ -0,0 +1,59 @@ +/*- + * Copyright (c) 2002-2006 Rice University + * Copyright (c) 2007 Alan L. Cox + * All rights reserved. + * + * This software was developed for the FreeBSD Project by Alan L. Cox, + * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY + * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Superpage reservation management definitions + */ + +#ifndef _VM_RESERV_H_ +#define _VM_RESERV_H_ + +#ifdef _KERNEL + +#if VM_NRESERVLEVEL > 0 + +vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex); +void vm_reserv_break_all(vm_object_t object); +boolean_t vm_reserv_free_page(vm_page_t m); +void vm_reserv_init(void); +int vm_reserv_level_iffullpop(vm_page_t m); +boolean_t vm_reserv_reactivate_page(vm_page_t m); +boolean_t vm_reserv_reclaim(void); +void vm_reserv_rename(vm_page_t m, vm_object_t new_object, + vm_object_t old_object, vm_pindex_t old_object_offset); +vm_paddr_t vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, + vm_paddr_t high_water); + +#endif /* VM_NRESERVLEVEL > 0 */ +#endif /* _KERNEL */ +#endif /* !_VM_RESERV_H_ */ Property changes on: head/sys/vm/vm_reserv.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property