Index: zh_TW.UTF-8/books/handbook/Makefile =================================================================== --- zh_TW.UTF-8/books/handbook/Makefile +++ zh_TW.UTF-8/books/handbook/Makefile @@ -1,9 +1,18 @@ # # $FreeBSD$ -# Original revision: 1.108 # -# Build the FreeBSD Handbook. +# Build the FreeBSD Handbook (Traditional Chinese). # +# Original revision: r46480 +# + +# ------------------------------------------------------------------------ +# To add a new chapter to the Handbook: +# +# - Update this Makefile, chapters.ent and book.xml +# - Add a descriptive entry for the new chapter in preface/preface.xml +# +# ------------------------------------------------------------------------ .PATH: ${.CURDIR}/../../share/xml/glossary @@ -20,7 +29,63 @@ IMAGES_EN+= advanced-networking/isdn-twisted-pair.eps IMAGES_EN+= advanced-networking/natd.eps IMAGES_EN+= advanced-networking/net-routing.pic +IMAGES_EN+= advanced-networking/pxe-nfs.png IMAGES_EN+= advanced-networking/static-routes.pic +IMAGES_EN+= bsdinstall/bsdinstall-adduser1.png +IMAGES_EN+= bsdinstall/bsdinstall-adduser2.png +IMAGES_EN+= bsdinstall/bsdinstall-adduser3.png +IMAGES_EN+= bsdinstall/bsdinstall-boot-loader-menu.png +IMAGES_EN+= bsdinstall/bsdinstall-boot-options-menu.png +IMAGES_EN+= bsdinstall/bsdinstall-newboot-loader-menu.png +IMAGES_EN+= bsdinstall/bsdinstall-choose-mode.png +IMAGES_EN+= bsdinstall/bsdinstall-config-components.png +IMAGES_EN+= bsdinstall/bsdinstall-config-hostname.png +IMAGES_EN+= bsdinstall/bsdinstall-config-keymap.png +IMAGES_EN+= bsdinstall/bsdinstall-config-services.png +IMAGES_EN+= bsdinstall/bsdinstall-config-crashdump.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-ipv4-dhcp.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-ipv4.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-ipv4-static.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-ipv6.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-ipv6-static.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface-slaac.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-interface.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-network-ipv4-dns.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-wireless-accesspoints.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-wireless-scan.png +IMAGES_EN+= bsdinstall/bsdinstall-configure-wireless-wpa2setup.png +IMAGES_EN+= bsdinstall/bsdinstall-distfile-extracting.png +IMAGES_EN+= bsdinstall/bsdinstall-distfile-fetching.png +IMAGES_EN+= bsdinstall/bsdinstall-distfile-verifying.png +IMAGES_EN+= bsdinstall/bsdinstall-final-confirmation.png +IMAGES_EN+= bsdinstall/bsdinstall-finalconfiguration.png +IMAGES_EN+= bsdinstall/bsdinstall-final-modification-shell.png +IMAGES_EN+= bsdinstall/bsdinstall-keymap-10.png +IMAGES_EN+= bsdinstall/bsdinstall-keymap-select-default.png +IMAGES_EN+= bsdinstall/bsdinstall-mainexit.png +IMAGES_EN+= bsdinstall/bsdinstall-netinstall-files.png +IMAGES_EN+= bsdinstall/bsdinstall-netinstall-mirrorselect.png +IMAGES_EN+= bsdinstall/bsdinstall-part-entire-part.png +IMAGES_EN+= bsdinstall/bsdinstall-part-guided-disk.png +IMAGES_EN+= bsdinstall/bsdinstall-part-guided-manual.png +IMAGES_EN+= bsdinstall/bsdinstall-part-manual-addpart.png +IMAGES_EN+= bsdinstall/bsdinstall-part-manual-create.png +IMAGES_EN+= bsdinstall/bsdinstall-part-manual-partscheme.png +IMAGES_EN+= bsdinstall/bsdinstall-part-review.png +IMAGES_EN+= bsdinstall/bsdinstall-post-root-passwd.png +IMAGES_EN+= bsdinstall/bsdinstall-set-clock-local-utc.png +IMAGES_EN+= bsdinstall/bsdinstall-timezone-confirm.png +IMAGES_EN+= bsdinstall/bsdinstall-timezone-country.png +IMAGES_EN+= bsdinstall/bsdinstall-timezone-region.png +IMAGES_EN+= bsdinstall/bsdinstall-timezone-zone.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-disk_info.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-disk_select.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-geli_password.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-menu.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-partmenu.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-vdev_invalid.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-vdev_type.png +IMAGES_EN+= bsdinstall/bsdinstall-zfs-warning.png IMAGES_EN+= geom/striping.pic IMAGES_EN+= install/adduser1.scr IMAGES_EN+= install/adduser2.scr @@ -28,6 +93,7 @@ IMAGES_EN+= install/boot-loader-menu.scr IMAGES_EN+= install/boot-mgr.scr IMAGES_EN+= install/config-country.scr +IMAGES_EN+= install/config-keymap.scr IMAGES_EN+= install/console-saver1.scr IMAGES_EN+= install/console-saver2.scr IMAGES_EN+= install/console-saver3.scr @@ -104,13 +170,6 @@ IMAGES_EN+= security/ipsec-crypt-pkt.pic IMAGES_EN+= security/ipsec-encap-pkt.pic IMAGES_EN+= security/ipsec-out-pkt.pic -IMAGES_EN+= vinum/vinum-concat.pic -IMAGES_EN+= vinum/vinum-mirrored-vol.pic -IMAGES_EN+= vinum/vinum-raid10-vol.pic -IMAGES_EN+= vinum/vinum-raid5-org.pic -IMAGES_EN+= vinum/vinum-simple-vol.pic -IMAGES_EN+= vinum/vinum-striped-vol.pic -IMAGES_EN+= vinum/vinum-striped.pic IMAGES_EN+= virtualization/parallels-freebsd1.png IMAGES_EN+= virtualization/parallels-freebsd2.png IMAGES_EN+= virtualization/parallels-freebsd3.png @@ -175,7 +234,9 @@ # XML content SRCS+= audit/chapter.xml SRCS+= book.xml +SRCS+= bsdinstall/chapter.xml SRCS+= colophon.xml +SRCS+= dtrace/chapter.xml SRCS+= advanced-networking/chapter.xml SRCS+= basics/chapter.xml SRCS+= bibliography/chapter.xml @@ -186,6 +247,8 @@ SRCS+= disks/chapter.xml SRCS+= eresources/chapter.xml SRCS+= firewalls/chapter.xml +SRCS+= zfs/chapter.xml +SRCS+= filesystems/chapter.xml SRCS+= geom/chapter.xml SRCS+= install/chapter.xml SRCS+= introduction/chapter.xml @@ -205,8 +268,6 @@ SRCS+= printing/chapter.xml SRCS+= security/chapter.xml SRCS+= serialcomms/chapter.xml -SRCS+= users/chapter.xml -SRCS+= vinum/chapter.xml SRCS+= virtualization/chapter.xml SRCS+= x11/chapter.xml @@ -230,8 +291,6 @@ XMLDOCS= lastmod:::mirrors.lastmod.inc \ mirrors-ftp-index:::mirrors.xml.ftp.index.inc \ mirrors-ftp:::mirrors.xml.ftp.inc \ - mirrors-cvsup-index:::mirrors.xml.cvsup.index.inc \ - mirrors-cvsup:::mirrors.xml.cvsup.inc \ eresources-index:::eresources.xml.www.index.inc \ eresources:::eresources.xml.www.inc DEPENDSET.DEFAULT= transtable mirror @@ -245,12 +304,6 @@ PARAMS.mirrors-ftp+= --param 'type' "'ftp'" \ --param 'proto' "'ftp'" \ --param 'target' "'handbook/mirrors/chapter.xml'" -PARAMS.mirrors-cvsup-index+= --param 'type' "'cvsup'" \ - --param 'proto' "'cvsup'" \ - --param 'target' "'index'" -PARAMS.mirrors-cvsup+= --param 'type' "'cvsup'" \ - --param 'proto' "'cvsup'" \ - --param 'target' "'handbook/mirrors/chapter.xml'" PARAMS.eresources-index+= --param 'type' "'www'" \ --param 'proto' "'http'" \ --param 'target' "'index'" @@ -261,8 +314,6 @@ SRCS+= mirrors.lastmod.inc \ mirrors.xml.ftp.inc \ mirrors.xml.ftp.index.inc \ - mirrors.xml.cvsup.inc \ - mirrors.xml.cvsup.index.inc \ eresources.xml.www.inc \ eresources.xml.www.index.inc Index: zh_TW.UTF-8/books/handbook/basics/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/basics/chapter.xml +++ zh_TW.UTF-8/books/handbook/basics/chapter.xml @@ -1,19 +1,26 @@ - - UNIX 基礎概念 + + + UNIX 基礎概念 概述 @@ -29,44 +36,61 @@ 如何使用 FreeBSD 的virtual consoles + &unix; 檔案權限運作的方式以及 &os; 中檔案的 flags。 + 預設的 &os; 檔案系統配置。 + &os; 的磁碟結構。 + 如何掛載(mount)、卸載(umount)檔案系統 + 什麼是processes、daemons 以及 signals 。 + 什麼是 shell ,以及如何變更您預設的登入環境。 + 如何使用基本的文字編輯器。 + 什麼是 devices 和 device nodes 。 + &os; 下使用的 binary 格式。 + 如何閱讀 manual pages 以獲得更多的資訊。 - Virtual Consoles 和終端機 - virtual consoles - terminals + + + virtual consoles + + + terminals + + + console + 有很多方法可以操作 FreeBSD ,其中一種就是在文字終端機上打字。 如此使用 FreeBSD 即可輕易的體會到 &unix; 作業系統的威力和彈性。 @@ -279,6 +303,798 @@ + + Users and Basic Account Management + + &os; allows multiple users to use the computer at the same + time. While only one user can sit in front of the screen and + use the keyboard at any one time, any number of users can log + in to the system through the network. To use the system, each + user should have their own user account. + + This chapter describes: + + + + The different types of user accounts on a + &os; system. + + + + How to add, remove, and modify user accounts. + + + + How to set limits to control the + resources that users and + groups are allowed to access. + + + + How to create groups and add users as members of a + group. + + + + + Account Types + + Since all access to the &os; system is achieved using + accounts and all processes are run by users, user and account + management is important. + + There are three main types of accounts: system accounts, + user accounts, and the superuser account. + + + System Accounts + + + accounts + system + + + System accounts are used to run services such as DNS, + mail, and web servers. The reason for this is security; if + all services ran as the superuser, they could act without + restriction. + + + accounts + daemon + + + accounts + operator + + + Examples of system accounts are + daemon, + operator, + bind, + news, and + www. + + + accounts + nobody + + + nobody is the + generic unprivileged system account. However, the more + services that use + nobody, the more + files and processes that user will become associated with, + and hence the more privileged that user becomes. + + + + User Accounts + + + accounts + user + + + User accounts are assigned to real people and are used + to log in and use the system. Every person accessing the + system should have a unique user account. This allows the + administrator to find out who is doing what and prevents + users from clobbering the settings of other users. + + Each user can set up their own environment to + accommodate their use of the system, by configuring their + default shell, editor, key bindings, and language + settings. + + Every user account on a &os; system has certain + information associated with it: + + + + User name + + + The user name is typed at the + login: prompt. Each user must have + a unique user name. There are a number of rules for + creating valid user names which are documented in + &man.passwd.5;. It is recommended to use user names + that consist of eight or fewer, all lower case + characters in order to maintain backwards + compatibility with applications. + + + + + Password + + + Each account has an associated password. + + + + + User ID (UID) + + + The User ID (UID) is a number + used to uniquely identify the user to the &os; system. + Commands that allow a user name to be specified will + first convert it to the UID. It is + recommended to use a UID less than 65535, since higher + values may cause compatibility issues with some + software. + + + + + Group ID (GID) + + + The Group ID (GID) is a number + used to uniquely identify the primary group that the + user belongs to. Groups are a mechanism for + controlling access to resources based on a user's + GID rather than their + UID. This can significantly reduce + the size of some configuration files and allows users + to be members of more than one group. It is + recommended to use a GID of 65535 or lower as higher + GIDs may break some software. + + + + + Login class + + + Login classes are an extension to the group + mechanism that provide additional flexibility when + tailoring the system to different users. Login + classes are discussed further in + . + + + + + Password change time + + + By default, passwords do not expire. However, + password expiration can be enabled on a per-user + basis, forcing some or all users to change their + passwords after a certain amount of time has + elapsed. + + + + + Account expiry time + + + By default, &os; does not expire accounts. When + creating accounts that need a limited lifespan, such + as student accounts in a school, specify the account + expiry date using &man.pw.8;. After the expiry time + has elapsed, the account cannot be used to log in to + the system, although the account's directories and + files will remain. + + + + + User's full name + + + The user name uniquely identifies the account to + &os;, but does not necessarily reflect the user's real + name. Similar to a comment, this information can + contain spaces, uppercase characters, and be more + than 8 characters long. + + + + + Home directory + + + The home directory is the full path to a directory + on the system. This is the user's starting directory + when the user logs in. A common convention is to put + all user home directories under /home/username + or /usr/home/username. + Each user stores their personal files and + subdirectories in their own home directory. + + + + + User shell + + + The shell provides the user's default environment + for interacting with the system. There are many + different kinds of shells and experienced users will + have their own preferences, which can be reflected in + their account settings. + + + + + + + The Superuser Account + + + accounts + superuser (root) + + + The superuser account, usually called + root, is used to + manage the system with no limitations on privileges. For + this reason, it should not be used for day-to-day tasks like + sending and receiving mail, general exploration of the + system, or programming. + + The superuser, unlike other user accounts, can operate + without limits, and misuse of the superuser account may + result in spectacular disasters. User accounts are unable + to destroy the operating system by mistake, so it is + recommended to login as a user account and to only become + the superuser when a command requires extra + privilege. + + Always double and triple-check any commands issued as + the superuser, since an extra space or missing character can + mean irreparable data loss. + + There are several ways to gain superuser privilege. + While one can log in as + root, this is + highly discouraged. + + Instead, use &man.su.1; to become the superuser. If + - is specified when running this command, + the user will also inherit the root user's environment. The + user running this command must be in the + wheel group or + else the command will fail. The user must also know the + password for the + root user + account. + + In this example, the user only becomes superuser in + order to run make install as this step + requires superuser privilege. Once the command completes, + the user types exit to leave the + superuser account and return to the privilege of their user + account. + + + Install a Program As the Superuser + + &prompt.user; configure +&prompt.user; make +&prompt.user; su - +Password: +&prompt.root; make install +&prompt.root; exit +&prompt.user; + + + The built-in &man.su.1; framework works well for single + systems or small networks with just one system + administrator. An alternative is to install the + security/sudo package or port. This + software provides activity logging and allows the + administrator to configure which users can run which + commands as the superuser. + + + + + Managing Accounts + + + accounts + modifying + + + &os; provides a variety of different commands to manage + user accounts. The most common commands are summarized in + , followed by some + examples of their usage. See the manual page for each utility + for more details and usage examples. + + + Utilities for Managing User Accounts + + + + + + + + Command + Summary + + + + + &man.adduser.8; + The recommended command-line application for + adding new users. + + + + &man.rmuser.8; + The recommended command-line application for + removing users. + + + + &man.chpass.1; + A flexible tool for changing user database + information. + + + + &man.passwd.1; + The command-line tool to change user + passwords. + + + + &man.pw.8; + A powerful and flexible tool for modifying all + aspects of user accounts. + + + +
+ + + <command>adduser</command> + + + accounts + adding + + + adduser + + + /usr/share/skel + + + skeleton directory + + + The recommended program for adding new users is + &man.adduser.8;. When a new user is added, this program + automatically updates /etc/passwd and + /etc/group. It also creates a home + directory for the new user, copies in the default + configuration files from + /usr/share/skel, and can optionally + mail the new user a welcome message. This utility must be + run as the superuser. + + The &man.adduser.8; utility is interactive and walks + through the steps for creating a new user account. As seen + in , either input + the required information or press Return + to accept the default value shown in square brackets. + In this example, the user has been invited into the + wheel group, + allowing them to become the superuser with &man.su.1;. + When finished, the utility will prompt to either + create another user or to exit. + + + Adding a User on &os; + + &prompt.root; adduser +Username: jru +Full name: J. Random User +Uid (Leave empty for default): +Login group [jru]: +Login group is jru. Invite jru into other groups? []: wheel +Login class [default]: +Shell (sh csh tcsh zsh nologin) [sh]: zsh +Home directory [/home/jru]: +Home directory permissions (Leave empty for default): +Use password-based authentication? [yes]: +Use an empty password? (yes/no) [no]: +Use a random password? (yes/no) [no]: +Enter password: +Enter password again: +Lock out the account after creation? [no]: +Username : jru +Password : **** +Full Name : J. Random User +Uid : 1001 +Class : +Groups : jru wheel +Home : /home/jru +Shell : /usr/local/bin/zsh +Locked : no +OK? (yes/no): yes +adduser: INFO: Successfully added (jru) to the user database. +Add another user? (yes/no): no +Goodbye! +&prompt.root; + + + + Since the password is not echoed when typed, be + careful to not mistype the password when creating the user + account. + + + + + <command>rmuser</command> + + + rmuser + + + accounts + removing + + + To completely remove a user from the system, run + &man.rmuser.8; as the superuser. This command performs the + following steps: + + + + Removes the user's &man.crontab.1; entry, if one + exists. + + + + Removes any &man.at.1; jobs belonging to the + user. + + + + Kills all processes owned by the user. + + + + Removes the user from the system's local password + file. + + + + Optionally removes the user's home directory, if it + is owned by the user. + + + + Removes the incoming mail files belonging to the + user from /var/mail. + + + + Removes all files owned by the user from temporary + file storage areas such as + /tmp. + + + + Finally, removes the username from all groups to + which it belongs in /etc/group. If + a group becomes empty and the group name is the same as + the username, the group is removed. This complements + the per-user unique groups created by + &man.adduser.8;. + + + + &man.rmuser.8; cannot be used to remove superuser + accounts since that is almost always an indication of + massive destruction. + + By default, an interactive mode is used, as shown + in the following example. + + + <command>rmuser</command> Interactive Account + Removal + + &prompt.root; rmuser jru +Matching password entry: +jru:*:1001:1001::0:0:J. Random User:/home/jru:/usr/local/bin/zsh +Is this the entry you wish to remove? y +Remove user's home directory (/home/jru)? y +Removing user (jru): mailspool home passwd. +&prompt.root; + + + + + <command>chpass</command> + + + chpass + + + Any user can use &man.chpass.1; to change their default + shell and personal information associated with their user + account. The superuser can use this utility to change + additional account information for any user. + + When passed no options, aside from an optional username, + &man.chpass.1; displays an editor containing user + information. When the user exits from the editor, the user + database is updated with the new information. + + + This utility will prompt for the user's password when + exiting the editor, unless the utility is run as the + superuser. + + + In , the + superuser has typed chpass jru and is + now viewing the fields that can be changed for this user. + If jru runs this + command instead, only the last six fields will be displayed + and available for editing. This is shown in + . + + + Using <command>chpass</command> as + Superuser + + #Changing user database information for jru. +Login: jru +Password: * +Uid [#]: 1001 +Gid [# or name]: 1001 +Change [month day year]: +Expire [month day year]: +Class: +Home directory: /home/jru +Shell: /usr/local/bin/zsh +Full Name: J. Random User +Office Location: +Office Phone: +Home Phone: +Other information: + + + + Using <command>chpass</command> as Regular + User + + #Changing user database information for jru. +Shell: /usr/local/bin/zsh +Full Name: J. Random User +Office Location: +Office Phone: +Home Phone: +Other information: + + + + The commands &man.chfn.1; and &man.chsh.1; are links + to &man.chpass.1;, as are &man.ypchpass.1;, + &man.ypchfn.1;, and &man.ypchsh.1;. Since + NIS support is automatic, specifying + the yp before the command is not + necessary. How to configure NIS is covered in . + + + + + <command>passwd</command> + + + passwd + + + accounts + changing password + + + Any user can easily change their password using + &man.passwd.1;. To prevent accidental or unauthorized + changes, this command will prompt for the user's original + password before a new password can be set: + + + Changing Your Password + + &prompt.user; passwd +Changing local password for jru. +Old password: +New password: +Retype new password: +passwd: updating the database... +passwd: done + + + The superuser can change any user's password by + specifying the username when running &man.passwd.1;. When + this utility is run as the superuser, it will not prompt for + the user's current password. This allows the password to be + changed when a user cannot remember the original + password. + + + Changing Another User's Password as the + Superuser + + &prompt.root; passwd jru +Changing local password for jru. +New password: +Retype new password: +passwd: updating the database... +passwd: done + + + + As with &man.chpass.1;, &man.yppasswd.1; is a link to + &man.passwd.1;, so NIS works with + either command. + + + + + <command>pw</command> + + + pw + + + The &man.pw.8; utility can create, remove, + modify, and display users and groups. It functions as a + front end to the system user and group files. &man.pw.8; + has a very powerful set of command line options that make it + suitable for use in shell scripts, but new users may find it + more complicated than the other commands presented in this + section. + +
+ + + Managing Groups + + + groups + + + /etc/groups + + + accounts + groups + + + A group is a list of users. A group is identified by its + group name and GID. In &os;, the kernel + uses the UID of a process, and the list of + groups it belongs to, to determine what the process is allowed + to do. Most of the time, the GID of a user + or process usually means the first group in the list. + + The group name to GID mapping is listed + in /etc/group. This is a plain text file + with four colon-delimited fields. The first field is the + group name, the second is the encrypted password, the third + the GID, and the fourth the comma-delimited + list of members. For a more complete description of the + syntax, refer to &man.group.5;. + + The superuser can modify /etc/group + using a text editor. Alternatively, &man.pw.8; can be used to + add and edit groups. For example, to add a group called + teamtwo and then + confirm that it exists: + + + Adding a Group Using &man.pw.8; + + &prompt.root; pw groupadd teamtwo +&prompt.root; pw groupshow teamtwo +teamtwo:*:1100: + + + In this example, 1100 is the + GID of + teamtwo. Right + now, teamtwo has no + members. This command will add + jru as a member of + teamtwo. + + + Adding User Accounts to a New Group Using + &man.pw.8; + + &prompt.root; pw groupmod teamtwo -M jru +&prompt.root; pw groupshow teamtwo +teamtwo:*:1100:jru + + + The argument to is a comma-delimited + list of users to be added to a new (empty) group or to replace + the members of an existing group. To the user, this group + membership is different from (and in addition to) the user's + primary group listed in the password file. This means that + the user will not show up as a member when using + with &man.pw.8;, but will show up + when the information is queried via &man.id.1; or a similar + tool. When &man.pw.8; is used to add a user to a group, it + only manipulates /etc/group and does not + attempt to read additional data from + /etc/passwd. + + + Adding a New Member to a Group Using &man.pw.8; + + &prompt.root; pw groupmod teamtwo -m db +&prompt.root; pw groupshow teamtwo +teamtwo:*:1100:jru,db + + + In this example, the argument to is a + comma-delimited list of users who are to be added to the + group. Unlike the previous example, these users are appended + to the group and do not replace existing users in the + group. + + + Using &man.id.1; to Determine Group Membership + + &prompt.user; id jru +uid=1001(jru) gid=1001(jru) groups=1001(jru), 1100(teamtwo) + + + In this example, + jru is a member of + the groups jru and + teamtwo. + + For more information about this command and the format of + /etc/group, refer to &man.pw.8; and + &man.group.5;. + +
+ 權限 UNIX Index: zh_TW.UTF-8/books/handbook/book.xml =================================================================== --- zh_TW.UTF-8/books/handbook/book.xml +++ zh_TW.UTF-8/books/handbook/book.xml @@ -1,24 +1,32 @@ -%chapters; - -%txtfiles; -]> - - FreeBSD 使用手冊 - - FreeBSD 文件計畫 + +%chapters; + +%txtfiles; +]> + + + + + FreeBSD 使用手冊 + + + FreeBSD 文件計畫 + - February 1999 + $FreeBSD$ $FreeBSD$ @@ -37,6 +45,12 @@ 2006 2007 2008 + 2009 + 2010 + 2011 + 2012 + 2013 + 2014 FreeBSD 文件計畫 @@ -50,9 +64,8 @@ &tm-attrib.adaptec; &tm-attrib.adobe; &tm-attrib.apple; - &tm-attrib.corel; &tm-attrib.creative; - &tm-attrib.cvsup; + &tm-attrib.google; &tm-attrib.heidelberger; &tm-attrib.ibm; &tm-attrib.ieee; @@ -60,19 +73,12 @@ &tm-attrib.intuit; &tm-attrib.linux; &tm-attrib.lsilogic; - &tm-attrib.m-systems; - &tm-attrib.macromedia; &tm-attrib.microsoft; - &tm-attrib.netscape; - &tm-attrib.nexthop; &tm-attrib.opengroup; &tm-attrib.oracle; - &tm-attrib.powerquest; &tm-attrib.realnetworks; &tm-attrib.redhat; - &tm-attrib.sap; &tm-attrib.sun; - &tm-attrib.symantec; &tm-attrib.themathworks; &tm-attrib.thomson; &tm-attrib.usrobotics; @@ -86,6 +92,7 @@ 歡迎使用FreeBSD! 本使用手冊涵蓋範圍包括了 + FreeBSD &rel3.current;-RELEASEFreeBSD &rel2.current;-RELEASEFreeBSD &rel.current;-RELEASE 的安裝和日常使用。 這份使用手冊是很多人的集體創作,而且仍然『持續不斷』的進行中。 @@ -140,6 +147,7 @@ &chap.introduction; + &chap.bsdinstall; &chap.install; &chap.basics; &chap.ports; @@ -177,7 +185,6 @@ 這些章節中有些需要您預先閱讀些相關文件,在各章節開頭的概要內會提及。 - &chap.desktop; @@ -202,17 +209,18 @@ &chap.config; &chap.boot; - &chap.users; &chap.security; &chap.jails; &chap.mac; &chap.audit; &chap.disks; &chap.geom; - &chap.vinum; + &chap.zfs; + &chap.filesystems; &chap.virtualization; &chap.l10n; &chap.cutting-edge; + &chap.dtrace; Index: zh_TW.UTF-8/books/handbook/bsdinstall/Makefile =================================================================== --- /dev/null +++ zh_TW.UTF-8/books/handbook/bsdinstall/Makefile @@ -0,0 +1,15 @@ +# +# Build the Handbook with just the content from this chapter. +# +# $FreeBSD: head/en_US.ISO8859-1/books/handbook/bsdinstall/Makefile 39631 2012-10-01 09:53:01Z gabor $ +# + +CHAPTERS= bsdinstall/chapter.xml + +VPATH= .. + +MASTERDOC= ${.CURDIR}/../${DOC}.${DOCBOOKSUFFIX} + +DOC_PREFIX?= ${.CURDIR}/../../../.. + +.include "../Makefile" Index: zh_TW.UTF-8/books/handbook/chapters.ent =================================================================== --- zh_TW.UTF-8/books/handbook/chapters.ent +++ zh_TW.UTF-8/books/handbook/chapters.ent @@ -8,7 +8,7 @@ Chapters should be listed in the order in which they are referenced. $FreeBSD$ - Original revision: 1.33 + Original revision: r45602 --> @@ -17,6 +17,7 @@ + @@ -31,15 +32,14 @@ - + - @@ -55,14 +55,12 @@ - - + + - - - + Index: zh_TW.UTF-8/books/handbook/colophon.xml =================================================================== --- zh_TW.UTF-8/books/handbook/colophon.xml +++ zh_TW.UTF-8/books/handbook/colophon.xml @@ -2,20 +2,19 @@ - + + This book is the combined work of hundreds of contributors to The FreeBSD Documentation Project. The text is - authored in SGML - according to the DocBook DTD and is formatted from SGML into many - different presentation formats using Jade, - an open source DSSSL - engine. Norm Walsh's DSSSL stylesheets were used with an - additional customization layer to provide the presentation - instructions for Jade. The printed - version of this document would not be possible without Donald - Knuth's &tex; typesetting language, - Leslie Lamport's LaTeX, or Sebastian - Rahtz's JadeTeX macro package. + authored in XML according to the DocBook DTD and is formatted + from XML into many different presentation formats using + XSLT. The printed version of this + document would not be possible without Donald Knuth's + &tex; typesetting language, Leslie + Lamport's LaTeX, or Sebastian Rahtz's + JadeTeX macro package. Index: zh_TW.UTF-8/books/handbook/config/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/config/chapter.xml +++ zh_TW.UTF-8/books/handbook/config/chapter.xml @@ -1,26 +1,49 @@ - - 設定與效能調校(Tuning) + + + + 設定與效能調校(Tuning) + - ChernLeeWritten by + + + Chern + Lee + + Written by + + - MikeSmithBased on a tutorial written by + + + Mike + Smith + + Based on a tutorial written by + + - MattDillonAlso based on tuning(7) written by + + + Matt + Dillon + + Also based on tuning(7) written by + - - 概述 @@ -70,6 +93,7 @@ + 最主要的設定檔 Index: zh_TW.UTF-8/books/handbook/cutting-edge/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/cutting-edge/chapter.xml +++ zh_TW.UTF-8/books/handbook/cutting-edge/chapter.xml @@ -1,1178 +1,1803 @@ - - 更新、升級 FreeBSD + + + + 更新、升級 &os; + - JimMockRestructured, reorganized, and parts updated by + + + Jim + Mock + + Restructured, reorganized, and parts updated + by + + - JordanHubbardOriginal work by - Poul-HenningKamp - JohnPolstra - NikClayton + + + Jordan + Hubbard + + Original work by + + + + + Poul-Henning + Kamp + + + + + + John + Polstra + + + + + + Nik + Clayton + + - - - - + 概述 - &os; 是個持續發展的作業系統。對於喜歡追求新鮮、刺激的使用者而言, - 有很多方法可以使您的系統輕鬆更新為最新版。 - 注意:並非每個人都適合這麼做! 本章主要是協助您決定到底要跟開發版本, - 或是要使用較穩定的釋出版。 - + &os; 是個持續發展的作業系統。有些人喜歡官方釋出的版本, +有些人則喜歡和官方最新的開發版本保持同步。然而即使是官方釋出的版本仍然時常有安全性更新或和其他緊急修復。無論使用哪種版本,&os;都提供所有必須的工具來讓系統保持最新,而且可以輕易升級不同版本。本章將描述如何追蹤開發版本,和保持&os;系統維持最新的基本工具。 讀完這章,您將了解︰ - &os.stable; 與 &os.current; 這兩分支的不同之處; + + 如何使用 + freebsd-update, + Subversion, 或 + CTM 讓 &os; 系統保持在最新的版本。 - 如何以 - CSup, - CVSup, - CVS 或 - CTM 來更新你的系統 + + + 如何比較安裝系統和原始複製的狀態。 - 如何以 make buildworld - 等指令來重新編譯、安裝整個 base system。 + + + 如何使用 + Subversion或是documentation + ports來使已安裝的文件保持最新。 + + + + 兩個開發分支的差異:&os.stable; 和 &os.current;。 + + 如何重新編譯和重新安裝整個基礎系統。 + 在開始閱讀這章之前,您需要︰ - 先設好你的網路()。 - + + 設定好你的網路 + ()。 + + + + 知道如何安裝第三方軟體 + (). - 知道如何透過 port/package 安裝軟體()。 + + + 本章中,使用 svn 來獲得和更新 &os; 原始碼。 + 為了能使用他,首先要安裝 devel/subversion + port 或 package。 + - - &os.current; vs. &os.stable; - -CURRENT - -STABLE + + + &os; Update - FreeBSD 有兩個發展分支:&os.current; 及 - &os.stable;。本節將會陸續介紹,並介紹它們分別又是如何更新。 - 首先,先介紹 &os.current;,接著再介紹 &os.stable;。 + + + + Tom + Rhodes + + Written by + + - - 使用最新的 &os; CURRENT + + + + Colin + Percival + + Based on notes provided by + + + - 這裡再次強調,&os.current; 是 &os; 開發的 最前線。 - &os.current; 使用者須有較強的技術能力, - 而且應該要有能力自己解決困難的系統問題。 若您是 &os; 新手, - 那麼請在安裝前最好先三思。 - - - 什麼是 &os.current;? - snapshot - - &os.current; 是 &os; 的最新版。它包含: - 仍在研發階段、實驗性質的修改、過渡時期的機制, - 這些東西在下一次正式 relase 的版本可能會有,也可能不會有的。 - 儘管有許多 &os; 開發者每天都會編譯 &os.current; source code, - 但有時這些原始碼是無法編譯成功。 雖然,這些問題通常會儘快解決, - 但 &os.current; 到底是帶來浩劫或是多了想要用的新功能、改善, - 這點主要取決於您更新原始碼的時機為何而定! - + + Updating and Upgrading + + + freebsd-update + updating-upgrading + - - 誰需要 &os.current;? + 即時應用安全性更新與升級作業系統到新的發行版本對一個持續運作的系統是重要的。&os; 包括一個叫 freebsd-update 的工具程式可以執行這兩項任務。 + 這個工具程式支援 &os; 二進制安全性與和錯誤更新, +不需要手動編譯和安裝修復或新核心。 +安全性團隊目前支援的所有架構和發行版都可以取得二進制更新。 +目前支援的發行版列表和他們的支援期限都列於 + http://www.FreeBSD.org/security/ + + 這個工具程式也支援作業系統升級到次要的發行版本和升級到令一個發行版分支升級到新的發行版本前,要檢查他的發行宣告,因為他包含發行版本相關的重要資訊。發行公告可以由http://www.FreeBSD.org/releases/取得。 + + + 如果有使用crontab來執行 + &man.freebsd-update.8;,那必須在升級作業系統前先停用。 + + + 這節描述 freebsd-update 使用的設定檔, +示範如何運用安全性修補和如何升級到主要或次要的作業系統發行版, +以及討論某些升級作業系統的考量。 + + + 設定檔 + + + freebsd-update預設的設定檔不需變更即可運作。 +有些使用者可能想要調校預設的設定檔 /etc/freebsd-update.conf +來對程序有更好的控制。這個設定檔的註解說明了可以使用的選項, +但以下可能需要更多一些的解釋: + - &os.current; 適合下列這三類人: + # Components of the base system which should be kept updated. +Components world kernel - - - &os; 社群成員:積極專注於 source tree 的某一部份, - 以及認為保持為 current(最新狀態) - 為絕對需求的人。 - + 這個參數控制 &os; 的哪個部份將保持最新。 + 預設是將更新整個 base system 和核心。 +個別元件可以被指定, +例如:src/basesrc/sys。 +然而最好的選項是維持預設設定, +因為改變設定去包括特定項目,則每個需要的項目都必須要列出。 +時間一久可能會因為原始碼和二進制檔案沒有更新而造成慘重的後果。 + + # Paths which start with anything matching an entry in an IgnorePaths +# statement will be ignored. +IgnorePaths /boot/kernel/linker.hints + + 保持特定的目錄,例如 + /bin/sbin, + 在更新過程不被更動,可以將他們的路徑加到此敘述中。 + 這個選項可以防止 freebsd-update覆蓋本機的修改。 + + # Paths which start with anything matching an entry in an UpdateIfUnmodified +# statement will only be updated if the contents of the file have not been +# modified by the user (unless changes are merged; see below). +UpdateIfUnmodified /etc/ /var/ /root/ /.cshrc /.profile + + 這個選項只會更新特定目錄中未修改的設定檔。 + 任何使用者修改的檔案都不會自動更新。 + 有另一個選項── + KeepModifiedMetadata會指示 + freebsd-update 在合併時將改變儲存下來 + + # When upgrading to a new &os; release, files which match MergeChanges +# will have any local changes merged into the version from the new release. +MergeChanges /etc/ /var/named/etc/ /boot/device.hints + + 列出 freebsd-update應嘗試合併的設定檔目錄。 + 檔案合併過程是一系列類似&man.mergemaster.8;的&man.diff.1;修補, +但是選項比較少。 + 合併可以接受,開啟編輯器,或是令freebsd-update中止。 +如果有疑慮,備份 /etc,然後同意合併。 + 更多關於mergemaster的資訊, +參見 。 + - - &os; 社群成員:為了確保 &os.current; - 能夠儘可能地維持在最穩定的狀態, - 而主動花時間解決問題的測試者。 此外,還有對 &os; - 能提出具體建議以及改善方向,並提出 patch 修正檔的人。 - + # Directory in which to store downloaded updates and temporary +# files used by &os; Update. +# WorkDir /var/db/freebsd-update + + 這個目錄是所有修補檔和暫存檔放置處。 + 當使用者進行版本升級時,這個位置應該要有至少1GB的可用磁碟空間。 + + # When upgrading between releases, should the list of Components be +# read strictly (StrictComponents yes) or merely as a list of components +# which *might* be installed of which &os; Update should figure out +# which actually are installed and upgrade those (StrictComponents no)? +# StrictComponents no + + 當這個選項設定為yes, + freebsd-update 將會假設 + Components 列表已完成,將不會嘗試做列表外的改變。 + 實際上 freebsd-update將嘗試更新每一個屬於 + Components 列表的檔案。 + - - 只是關心或者想參考(比如,只是閱讀, - 而非執行)的人。 - 這些人有時也會做些註解,或貢獻原始碼。 - - - + + 運用安全性修補 - - &os.current; <emphasis>並不是</emphasis> 什麼? + 運用 &os; 安全性修補的過程已經被簡化, + 允許系統管理員使用freebsd-update保持系統更新。 + 更多關於&os; 安全性報告的資訊可以參考 + - - - 追求最新功能。 聽說裡面有些很酷的新功能, - 並希望成為您周圍的人中第一個嘗試的人, - 因此將 &os.current; 視為取得搶鮮版的捷徑。 - 儘管,您能夠因此首先瞭解到最新的功能, - 但這也意味著若出現新的 bug 時,您也是首當其衝。 - + &os; 安全性修補可以使用以下指令下載與安裝。 + 第一個指令將決定是否有尚未完成的修補,如果有,將列出執行修補將會變更的檔案清單。第二個指令將會執行修補。 + - - 修復 bug 的速成法。 因為 &os.current; - 的任何版本在修復已知 bug 的同時,又可能會產生新的 bug。 - - + &prompt.root; freebsd-update fetch +&prompt.root; freebsd-update install - - 無所不在的 officially supported。 - 我們會盡力協助上述 &os.current; 的那三種類別的 - legitimate 使用者, - 但我們沒時間為他們提供技術支援。 - 這不代表我們很惡劣,或是不想幫助人(若是的話, - 我們也不會為 &os; 努力了) - ,實在是因為我們分身乏術,無法每天回答數百個問題, - 而同時繼續開發 &os;。 - 可以確定的一點就是, - 在改善 &os; 或是回答大量有關實驗碼的問題之間, - 若要做個選擇的話,開發者會選擇前者。 - - - + 如果更新執行任何核心修補,系統將會重新開機以使用修補過的核心。 +如果在任何執行中的二進位檔進行修補,被影響的應用程式將會重新啟動來使用修補過的二進位檔。 - - 使用 &os.current; + 將以下項目加入 /etc/crontab 系統可以每天自動檢查是否有更新: - - - 加入 &a.current.name;-CURRENTusing 及 &a.cvsall.name; 論壇。 - 這不單只是個建議,也是 必須 作的。 - 若您沒訂閱 &a.current.name; - ,那麼就會錯過別人對目前系統狀態的說明,而枯耗在別人已解的問題。 - 更重要的是,可能會錯失一些對己身所管系統安危相當重要的公告。 - - - 在 &a.cvsall.name; 上則可以看到每個 commit 紀錄, - 因為這些記錄會連帶影響其他相關資訊。 - - 要訂閱這些論壇或其他論壇,請參考 &a.mailman.lists.link; - 並點選想訂閱的部分即可。 至於其他後續步驟如何進行, - 在那裡會有說明。 - + @daily root freebsd-update cron - - 從 &os; mirror 站 - 取得原始碼。 有兩種方式可以達成: + 如果有新的修補,它們將會自動下載,但是還不會執行。 + 管理者root 將會收到email來檢視修補然後手動執行 + freebsd-update install 來安裝 - - - csupcvsup 或 - cvsup 程式搭配位於 - /usr/share/examples/cvsup 檔名為 - standard-supfile 的 - supfile。 - 這是大家最常推薦的方式,因為它可以讓您把整個 tree 都抓回來, - 之後就只取有更新的部分即可。 - 此外,許多人會把 csup 或 - cvsup 放到 - croncron 以定期自動更新。 - 您須要自訂前述的 supfile 範例檔, - 並針對自身網路環境以調整 csup - 或 cvsup-CURRENTSyncing with CVSup 相關設定。 - - - - 使用 CTM-CURRENTSyncing with CTM 工具。 若網路環境不佳 - (上網費用貴,或只能用 email 而已) - CTM 會比較適合您的需求。 - 然而,這也有一些爭議並且常抓到一些有問題的檔案。 因此, - 很少人會用它。 這也註定了不能長期依賴這個更新方式。 - 若是使用 9600 bps modem 或頻寬更大的上網者,建議使用 - CVSup - 。 - - - + 如果有發生任何錯誤,freebsd-update + 可以使用以下指令回溯最後的變更: - - 若抓 source code 是要用來跑的,而不僅只是看看而已, - 那麼就抓 整個 &os.current;,而不要只抓部分。 - 因為大部分的 source code 都會相依到其他 source code 環節部分, - 若是您只編譯其中一部份,保證會很麻煩。 - - 在編譯 &os.current;-CURRENTcompiling 之前,請仔細閱讀 - /usr/src 內的 Makefile。 - 儘管只是升級部分東西而已,您至少也要先 - 裝新的 kernel 以及重新編譯 world。 此外,多多閱讀 - &a.current; 以及 /usr/src/UPDATING - 也是必須的, - 才能知道目前進度是怎樣以及下一版會有什麼新東西。 - + &prompt.root; freebsd-update rollback +Uninstalling updates... done. - - 熱血!若您正在跑 &os.current;, - 我們很想知道您對於它的想法是什麼,尤其是加強哪些功能, - 或該修正哪些錯誤的建議。 如果您在建議時能附上相關程式碼的話, - 那真是太棒了! - - - + 再次,如果核心或任何核心模組有變更,系統將重新開機,受影響的二進位檔會重新執行。 + + + 只有 GENERIC 核心可以自動被 + freebsd-update 更新。 + 如果有安裝自訂的核心,在freebsd-update + 完成安裝更新後,將會被重新編譯和重新安裝。 + 然而,如果 /boot/GENERIC 存在, + freebsd-update將會偵測和更新 GENERIC 核心, + 即使他並非目前系統正在執行的核心。 + + + 永遠在 /boot/GENERIC 保留一份 GENERIC + 核心的備份。這對於診斷不同的問題與版本的升級有幫助。 + 參考 關於如何備份 + GENERIC 核心的說明 + + + 除非 /etc/freebsd-update.conf 的預設設定被改變, + freebsd-update將安裝更新過的核心原始碼和其餘的更新 + 然後就可以照平常的方式重新編譯和重新安裝新的自訂核心。 + + freebsd-update 發行的更新並非總是包含核心。 + 如果核心的原始碼沒有被 freebsd-update install 變更,並不需要重新編譯自訂核心。 + 然而 freebsd-update 總是會更新 + /usr/src/sys/conf/newvers.sh。目前修補的程度, + 如同執行 uname -r 顯示的 -p + 數字是由這個檔案取得。 + 即使沒有做任何改變,重新編譯核心會讓 uname 正確地報告目前系統修補的程度。 + 這對於維護多個系統特別有幫助,可以讓你快速評估每個系統安裝的更新。 - - 使用最新的 &os; STABLE + + 執行主要和次要的版本升級 + + 從&os;的次要版本升級到另一個版本,例如從 + &os; 9.0 到 &os; 9.1, 叫作 + 次要版本更新。 + 主要版本更新發生在當 &os; + 從一個主要版本升級到主要版本升級到另一個主要版本時 +,例如從 &os; 9.X 到 &os; 10.X。 +兩種更新都可以透過提供 freebsd-update 發行版本來執行。 + + + 如果系統正在執行自訂的核心,開始升級前, + 確定 GENERIC 核心的副本在 + /boot/GENERIC。 + 參考 關於如何製作 + GENERIC核心副本的說明。 + + + 以下的指令執行在 &os; 9.0 系統時, + 將會把系統升級至 &os; 9.1: + + &prompt.root; freebsd-update -r 9.1-RELEASE upgrade + + 當收到這個指令後, + freebsd-update 將會評估設定檔和目前的系統來收集升級需要的資訊。 + 螢幕上的清單會顯示偵測到或沒偵測到哪些元件。例如: + Looking up update.FreeBSD.org mirrors... 1 mirrors found. +Fetching metadata signature for 9.0-RELEASE from update1.FreeBSD.org... done. +Fetching metadata index... done. +Inspecting system... done. + +The following components of FreeBSD seem to be installed: +kernel/smp src/base src/bin src/contrib src/crypto src/etc src/games +src/gnu src/include src/krb5 src/lib src/libexec src/release src/rescue +src/sbin src/secure src/share src/sys src/tools src/ubin src/usbin +world/base world/info world/lib32 world/manpages + +The following components of FreeBSD do not seem to be installed: +kernel/generic world/catpages world/dict world/doc world/games +world/proflibs + +Does this look reasonable (y/n)? y + + 此時,freebsd-update 將嘗試下載所有升級需要的檔案。 +在某些案例,使用者會被提示一些關於安裝什麼或是如何進行的問題。 + + 當使用自訂核心,上述的步驟將會產生如下列的警告: + + WARNING: This system is running a "MYKERNEL" kernel, which is not a +kernel configuration distributed as part of FreeBSD 9.0-RELEASE. +This kernel will not be updated: you MUST update the kernel manually +before running "/usr/sbin/freebsd-update install" + + 這個警告可以安全地忽略,升級過程將會立即使用更新過的 + GENERIC 核心 + + 一旦所有的修補都被下載到本地的系統, + 它們將會被運用。這個過程可能會花點時間,取決於機器的速度和工作量 + 設定檔將會被合併。 + 合併的過程中當檔案被合併或是手動合併螢幕上出現編輯器時需要使用者介入。 + 每一個成功合併的結果將會顯示給使用者。 + 失敗或是被忽略的合併將會使程序中斷。使用者稍候可能想要備份 + /etc 和手動合併重要的檔案,例如: + master.passwd 或 + group + + + 當所有修補和合併在另一個目錄進行時,系統還不會被警告。 + 一旦所有修補都成功運用,所有設定檔都被合併,而且過程順利,使用者可以以下指令來將這些改變付諸於磁碟上: + + &prompt.root; freebsd-update install + - - 什麼是 &os.stable;? - -STABLE - - &os.stable; 是我們的開發分支,主要的發行版就由此而來。 - 這個分支會以不同速度作修改變化,並且假設這些是第一次進入 &os.current; - 進行測試。 然而,這 仍然 屬於開發中的分支, - 也就是說在某些時候,&os.stable; 可能會、也可能不會符合一些特殊需求。 - 它只不過是另一個開發分支而已,可能不太適合一般使用者。 + 核心和核心模組將會先被修補。如果系統正在執行自訂核心,使用 + &man.nextboot.8; 指令設定下次開機的核心為更新過的 + /boot/GENERIC + + &prompt.root; nextboot -k GENERIC + + + 如果機器以遠端遙控來更新, + 使用GENERIC核心重新開機前, + 確定他包含所有系統開機需要的驅動程式而且連接網路, + 特別是當執行的自訂核心包含核心模組提供內建功能時, + 確定暫時地使用 /boot/loader.conf 工具載入這些模組到 GENERIC 核心。 + 建議停用非必須的服務和磁碟與網路掛載直到升級程序完成。 + + + + 機器現在應該更新過的核心重新開機: + + + &prompt.root; shutdown -r now + + 一旦系統重新上線,使用以下指令重新開始 + freebsd-update。 + 因為程序的狀態已被儲存, + freebsd-update 將不會重頭開始,他會進行到下一個階段 + ,移除所有舊的共用程式庫和目標檔。 + + &prompt.root; freebsd-update install + + + 根據程式庫版本編號, 可能有兩個而不是三個安裝階段。 + + + 升級程序現在完成了。如果這是主要的版本升級,參考 + + 的描述重新安裝所有的ports和套件。 + + + &os; 9.X 以上自訂核心 + + 使用 freebsd-update 前,確定有一份核心的副本, ensure + that a copy of the GENERIC kernel + exists in /boot/GENERIC. If a custom + kernel has only been built once, the kernel in + /boot/kernel.old is the + GENERIC kernel. Simply rename this + directory to /boot/kernel. + + If a custom kernel has been built more than once or if + it is unknown how many times the custom kernel has been + built, obtain a copy of the GENERIC + kernel that matches the current version of the operating + system. If physical access to the system is available, a + copy of the GENERIC kernel can be + installed from the installation media: + + &prompt.root; mount /cdrom +&prompt.root; cd /cdrom/usr/freebsd-dist +&prompt.root; tar -C/ -xvf kernel.txz boot/kernel/kernel + + Alternately, the GENERIC kernel may + be rebuilt and installed from source: + + &prompt.root; cd /usr/src +&prompt.root; make kernel __MAKE_CONF=/dev/null SRCCONF=/dev/null + + For this kernel to be identified as the + GENERIC kernel by + freebsd-update, the + GENERIC configuration file must not + have been modified in any way. It is also suggested that + the kernel is built without any other special + options. + + Rebooting into the GENERIC kernel + is not required as freebsd-update only + needs /boot/GENERIC to exist. - - 誰需要 &os.stable;? + + &os; 8.X 自訂核心 - 若您有興趣去追蹤、貢獻 FreeBSD 開發過程或作些貢獻, - 尤其是會跟 FreeBSD 接下來的 關鍵性 發行有關, - 應該考慮採用 &os.stable;。 - - 雖然安全漏洞的修補也會進入 &os.stable; 分支, - 但不必僅僅因此而 需要 去用 &os.stable;。 - FreeBSD 每項 security advisory(安全公告) - 都會解說如何去修復有受到影響的版本 - 然而,這也不一定是正確,我們不可能永遠支援 FreeBSD - 昔日的各種發行版本,儘管每個發行版發佈之後,都仍會持續支援數年之久。 - 若欲瞭解 FreeBSD 目前對於舊版的支援政策細節,請參閱 http://www.FreeBSD.org/security/ - 。 - - ,若僅因為安全因素而去採用開發分支,雖然會解決現有已知問題, - 但也可能帶來一些潛藏的問題。 - - 儘管我們盡力確保 &os.stable; 分支在任何時候均能正確編譯、運作, - 但沒人能夠擔保它隨時都可以符合上述目的。 此外,雖然原始碼在進入 - &os.stable; 之前,都會先在 &os.current; 開發完畢,但使用 &os.current; - 的人畢竟遠比 &os.stable; 使用者來的少,所以通常有些問題,可能在 - &os.current; 比較沒人注意到,隨著 &os.stable; - 使用者的廣泛使用才會浮現。 - - 由於上述這些理由,我們並不推薦 盲目追隨 - &os.stable;,而且更重要的是,別在原始碼尚未經完整測試之前, - 就衝動把 production server 轉移到 &os.stable; 環境。 + On an &os; 8.X system, the instructions for + obtaining or building a GENERIC kernel + differ slightly. + + Assuming physical access to the machine is possible, a + copy of the GENERIC kernel can be + installed from the installation media using the following + commands: + + &prompt.root; mount /cdrom +&prompt.root; cd /cdrom/X.Y-RELEASE/kernels +&prompt.root; ./install.sh GENERIC + + Replace X.Y-RELEASE + with the version of the release being used. The + GENERIC kernel will be installed in + /boot/GENERIC by default. + + To instead build the GENERIC kernel + from source: + + &prompt.root; cd /usr/src +&prompt.root; env DESTDIR=/boot/GENERIC make kernel __MAKE_CONF=/dev/null SRCCONF=/dev/null +&prompt.root; mv /boot/GENERIC/boot/kernel/* /boot/GENERIC +&prompt.root; rm -rf /boot/GENERIC/boot + + For this kernel to be picked up as + GENERIC by + freebsd-update, the + GENERIC configuration file must not + have been modified in any way. It is also suggested that it + is built without any other special options. - 若您沒有這些多的時間、精神的話,那推薦您使用最新的 FreeBSD - 發行版即可,並採用其所提供的 binary 更新機制來完成升級轉移。 + Rebooting into the GENERIC kernel + is not required. - - 使用 &os.stable; + + 主要版本更新後更新 Packages - - - 訂閱 &a.stable.name;-STABLEusing list。 可以讓您隨時瞭解 &os.stable; - 的軟體編譯時的相依關係,以及其他需特別注意的問題。 - 開發者在考慮一些有爭議的修正或更新時,就會先在這裡發信說明, - 給使用者有機會可以反應, - 看他們對所提的更改是否有什麼建議或問題。 + 一般來說,次要版本更新後安裝的應用程式可以沒有問題地繼續執行。 + 主要版本間使用不同的應用程式二進位介面 Application Binary Interfaces + (ABIs),會破壞大部份第三方應用程式。 + 主要版本更新後,所有安裝的套件和 ports 需要使用應用程式來升級,例如 + ports-mgmt/portmaster + 重新編譯所有應用程式,可以使用以下指令完成: - 而 &a.cvsall.name; list 這邊可以看到每個 commit log, - 其中包括了許多中肯的資訊,例如一些可能發生的邊際效應等等。 + &prompt.root; portmaster -af - 想要加入這些通信論壇的話,只要到 &a.mailman.lists.link; - 點下想訂閱的 list 即可。 其餘的步驟在網頁上會有說明。 - + 這個指令將會顯示每個程式的設定選項設定畫面,等待使用者的互動。 + 如果要使用預設的選項,可以在上述指令使用選項。 + - - 若打算要安裝一個全新的系統,並且希望裝 &os.stable; - 每月定期的 snapshot,那麼請參閱 Snapshots 網頁以瞭解相關細節。 - 此外,也可從 mirror 站 - 來安裝最新的 &os.stable; 發行版,並透過下列的的說明來更新到最新的 - &os.stable; 原始碼。 - - 若已裝的是 &os; 以前的版本,而想透過原始碼方式來升級, - 那麼也是可以利用 &os; mirror 站 - 來完成。 以下介紹兩種方式: - - - - csupcvsup 或 - cvsup 程式搭配位於 - /usr/share/examples/cvsup 檔名為 - stable-supfile 的 - supfile。 這是大家最常推薦的方式, - 因為它可以讓你把整個 tree 都抓回來, - 之後就只取有更新的部分即可。 - 此外,許多人會把 csup 或 - cvsup 放到 croncron - 以定期自動更新。 您須要自訂前述的 - supfile 範例檔,並針對自身網路環境以調整 - csup 或 - cvsup-STABLEsyncing with CVSup 相關設定。 - - - - 使用 CTM-STABLEsyncing with CTM 更新工具。 - 若網路不快或網路費用貴,那麼可以考慮採用。 - - - + 一旦軟體升級完成, 執行 + freebsd-update 來完成所有升級過程的零碎事情 : - - 一般而言,若常需存取最新原始碼,而不計較網路頻寬的話, - 可以使用 csupcvsup - 或 ftp。 否則,就考慮 - CTM - + &prompt.root; freebsd-update install - - 在編譯 &os.stable;-STABLEcompiling 之前,請先仔細閱讀 - /usr/src 內的 Makefile - 檔。 儘管只是升級部分東西而已,您至少也要先 裝新的 kernel 以及重新編譯 world。 - 此外,多多閱讀 &a.stable; 以及 - /usr/src/UPDATING 也是必備的, - 這樣才能知道目前進度是怎樣,以及下一版會有哪些新東西。 - - + 如果是暫時使用 GENERIC 核心, + 現在請使用 + 的說明編譯和安裝新的自訂核心。 + + 重新開機進入新版的 &os;。現在已經完成升級過程了。 + - - - 更新你的 Source + + 系統狀態比較 - &os; 計劃原始碼有許多透過網路(或 email)的方式來更新, - 無論是更新那一塊領域,這些全由您自行決定。 我們主要提供的是 Anonymous CVS、CVSup - 、CTM。 + 已安裝的 &os; 版本可以使用 freebsd-update IDS +來跟另一個已知好的複製版本來做測試。 +這個指令評估系統應用程式,程式庫和設定檔案目前的版本, +可以被當成內建的入侵偵測系統來使用 (IDS)。 - - 雖然可以只更新部分原始碼,但唯一支援的更新流程是更新整個 tree, - 並且重編 userland(比如:由使用者去執行的所有程式,像是 - /bin/sbin 內的程式)以及 - kernel 原始碼。 - 若只更新部分的 source tree、或只有 kernel 部分、或只有 userland - 部分,通常會造成一些錯誤,像是:編譯錯誤、kernel panic、資料毀損等 - 。 - + + 這個指令不是取代真正的 + IDS ,例如 + security/snort。當 + freebsd-update 儲存資料在磁碟裡,是有被竄改的可能性 + 可以使用 kern.securelevel + 或是將沒有在使用的 freebsd-update + 的資料儲存在唯讀檔案系統來減少這樣的可能性, + 比較好的解決方法是將系統和安全的磁碟,例如 + DVD 或是安全的外接 + USB 磁碟裝置做比較。 + 另類的方法是使用在 描述的內建應用程式提供的 + IDS 功能 + - - CVS - anonymous - + 為了開始比較,先指定特定的輸出檔案來儲存結果: - Anonymous CVS 及 - CVSup 均是採 pull - 模式來更新原始碼。 以 CVSup 為例, - 使用者(或 cron script)會執行 cvsup - 程式,後者會與某一台 cvsupd 伺服器作些互動, - 以更新相關原始碼檔案。 您所收到更新會是當時最新的, - 而且只會收到需更新的部分。 此外,也可以很輕鬆去設定要更新的範圍。 - 更新會由伺服器跟本機比對之後,丟出當時您所需要的更新檔案給你。 - Anonymous CVS 的概念相對於 - CVSup 來得更簡單些,因為它只是 - CVS 的延伸而已,一樣讓你可從遠端的 - CVS repository 取出最新原始碼。 然而 CVSup - 在這方面會更有效率,不過 Anonymous CVS - 對新手而言,是用起來比較簡單。 + &prompt.root; freebsd-update IDS >> outfile.ids - - CTM - - 另一種方式則是 CTM。 - 它並不是以交談式介面來比對您所擁有的 sources 和伺服器上的 sources - 或是您取得的更新部份。 相反的,會有一個 script - 檔專門用來辨識變更過的檔案,這個程式是由 CTM 伺服器來執行, - 每天會比對數次,並把兩次執行期間內變更過的檔案加以壓縮, - 並給它們一個序號,然後就加以編碼(只用 printable ASCII 字元), - 並以 email 的方式寄出。 當您收到它的時候,這些 CTM deltas - 就可以由 &man.ctm.rmail.1; 程式來處理,該程式會自動解碼、確認、 - 套用這些變更。 這程序比 CVSup 來說是快得多了, - 而且,這個模式對我們的伺服器來說是比較輕鬆的,因為這是一個 - push 的模式,而非 pull - 的模式。 - - 當然,這樣做也會帶來一些不便。 若不小心把您部份的程式清除掉了, - CVSup 會偵測出來,並自動為您把不足的部份補齊。 - CTM 並不會為您做這些動作。 - 若清掉了您的部份 source (而且沒備份),您可以從頭開始(從最新的 CVS - base delta)並用 CTM 來重建它們 - ,或是用 Anonymous CVS 來完成, - 只要把不正確的地方砍掉,再重新做同步的動作即可。 + 現在系統將會被檢查, + 而包含已知發行版和現在安裝版的SHA256雜湊值 + 的冗長檔案清單將會被送至指定的輸出檔。 + + 清單的項目相當長,但是輸出格式很容易被分析。 + 例如,要獲得一個和發行版不同的檔案清單,可以下以下指令: + + &prompt.root; cat outfile.ids | awk '{ print $1 }' | more +/etc/master.passwd +/etc/motd +/etc/passwd +/etc/pf.conf + + 這個輸出範例已經被截短,原來有更多的檔案存在。 + 有些檔案自然會有修改。例如,如果有使用者被加入系統, + /etc/passwd 會被修改 + 如果 freebsd-update 有更新過,核心模組可能會不同 + 為了要排除特定的檔案或目錄,把它們加到/etc/freebsd-update.conf 裡的 IDSIgnorePaths 選項。 + - - 重新編譯 <quote>world</quote> + + 更新文件組 + + 更新和升級 - Rebuilding world + 文件 + 更新和升級 - 在更新 &os; 的 source tree 到最新之後(無論是 &os.stable;、 - &os.current; 等等),接下來就可以用這些 source tree 來重新編譯系統 - 。 - - 做好備份 + 文件是&os;作業系統不可或缺的一部份。 + 最新版本的 &os; 文件可以在 + &os; 網站取得(http://www.freebsd.org/doc/), + 很方便有一份最新的&os; + 網站,使用手冊, 常見問答和文章的本地端副本。 - 在作任何大動作 之前 - 要記得先把系統作備份的重要性無須強調。 儘管重新編譯 world 是 - (只要有照文件指示去作的話)一件很簡單的事情,但出錯也是在所難免的。 - 另外,別人在 source tree 不慎搞混的錯誤,也可能會造成系統無法開機 - 。 - - 請確認自己已作妥相關備份,並且手邊有 fixit 磁片或開機光碟。 - 您可能永遠也用不到這些東西, - 但安全第一總比事後說抱歉來得好吧! - + 這一節描述如何使用原始碼或是 + &os; Ports 管理機制將 + 本地端&os;文件保持最新。 - - 訂閱相關的 Mailing List + 編輯和發佈文件更正的資訊 + 請參考 &os; 文件計劃新貢獻者入門書 + (http://www.freebsd.org/doc/en_US.ISO8859-1/books/fdp-primer/). - mailing list - &os.stable; 以及 &os.current; 分支,本質上就是屬於 - 開發階段。 為 &os; 作貢獻的也都是人,偶爾也會犯錯誤。 - - 有時候這些錯誤並無大礙,只是會讓系統產生新的錯誤警告而已。 - 有時則是災難,可能會導致不能開機或檔案系統的毀損(或更糟)。 - - 若遇到類似問題,貼封標題為 heads up(注意) - 開頭的信到相關的 mailing list,並講清楚問題點以及會影響哪些系統。 - 在問題獲解決後,再貼標題為 all clear(已解決) - 開頭的聲明信。 + + 從原始碼更新文件 - 若用的是 &os.stable; 或 &os.current;,卻又不閱讀 &a.stable; 或 - &a.current; 的討論,那麼會是自找麻煩而已。 - + 重新從原始碼編譯&os; 文件需要一些不是屬於 + &os; 基礎系統的工具 + 這些需要的工具包括 + svn 可以從 + textproc/docproj 套件安裝或是 + &os; 文件計劃的開發的port - - 不要用 <command>make world</command> + 一旦安裝好,請使用 svn + 來取得乾淨的文件原始碼副本。 + 將 https://svn0.us-west.FreeBSD.org + 置換成 裡地理位置和你最近的鏡像站: - 一堆早期的舊文件都會建議說使用 make world。 - 這樣做會跳過一些重要步驟,建議只有在你知道自己在作什麼,再這麼做。 - 在絕大多數的情況下,請不要亂用 make world, - 而該改用下面介紹的方式。 - + &prompt.root; svn checkout https://svn0.us-west.FreeBSD.org/doc/head /usr/doc - - 更新系統的標準方式 + 第一次下載文件原始碼需要花點時間,請讓他執行完畢 - 要升級系統前,一定要先查閱 /usr/src/UPDATING - 文件,以瞭解 buildworld 之前需要作哪些事情或注意事項, - 然後才用下列步驟: - - &prompt.root; make buildworld -&prompt.root; make buildkernel -&prompt.root; make installkernel -&prompt.root; reboot + 將來文件原始碼更新的取得可以執行: - - 在少數狀況,可能需要先在 buildworld - 步驟之前先作 mergemaster -p 才能完成。 - 至於何時需要或不需要,請參閱 UPDATING 內的說明。 - 一般來說,只要不是進行跨版號(major)的 &os; 版本升級, - 就可略過這步驟。 - + &prompt.root; svn update /usr/doc - 完成 installkernel 之後,需要重開機並切到 - single user 模式(舉例:也可以在 loader 提示符號後面加上 - boot -s)。 接下來執行: - - &prompt.root; mergemaster -p -&prompt.root; make installworld -&prompt.root; mergemaster -&prompt.root; reboot + 當最新的文件原始碼快照已經抓取到 + /usr/doc,一切都已就緒可以對已安裝的文件進行更新。 - - Read Further Explanations + 要完整更新所有語言,可以執行: - 上述步驟只是協助您升級的簡單說明而已,若要清楚瞭解每一步驟, - 尤其是若欲自行打造 kernel 設定,就更該閱讀下面的內容。 - - + &prompt.root; cd /usr/doc +&prompt.root; make install clean + + 如果只要更新一個特定的語言,可以在 /usr/doc +中特定語言的子目錄執行 make: + + &prompt.root; cd /usr/doc/en_US.ISO8859-1 +&prompt.root; make install clean + + 另一個更新文件的方法是在 /usr/doc +或特定語言的子目錄執行: + + &prompt.root; make update + + 輸出格式可以經由設定 FORMATS 來指: + + &prompt.root; cd /usr/doc +&prompt.root; make FORMATS='html html-split' install clean - - 閱讀 <filename>/usr/src/UPDATING</filename> + 有幾個選項可以使得只要更新部份文件或是建立特定翻譯的過程更加簡易。 +這些選項可以在 /etc/make.conf 中設定成整個系統的選項, +或是經由命令列傳送給 make - 在作任何事情之前,請務必先閱讀 - /usr/src/UPDATING (或在 source code 內類似的文件) - 。 這份文件會寫到可能遭遇的問題,或指定那些會執行的指令順序為何。 - 如果你機器現在的 UPDATING - 文件與這邊的描述有衝突、矛盾之處,那麼請以機器上的 - UPDATING 為準。 - - - 然而,如同先前所述,單單只靠閱讀 UPDATING - 並不能完全取代 mailing list。 這兩者都是互補的,而不相排斥。 - + 這些選項包括: + + + + DOC_LANG + + + 要建立或是安裝的語言和編碼清單,例如英文文件用 + en_US.ISO8859-1 + + + + + FORMATS + + + 單一格式或是要建立的輸出格式清單。目前支援 html, + html-split, txt, + ps, 和 pdf + + + + + DOCDIR + + + 安裝文件的位置。預設裝在 + /usr/share/doc + + + + + 更多關於&os;全系統的make 變數, + 請參考&man.make.conf.5;。 - - 檢查 <filename>/etc/make.conf</filename> + + + 從 Ports 更新文件 + + + + + Marc + Fonvieille + + Based on the work of + + + + - make.conf + Updating and Upgrading - 檢查 - /usr/share/examples/etc/make.conf - 以及 - /etc/make.conf。 第一份文件乃是一些系統預設值 - – 不過,大部分都被註解起來。 為了在重新編譯時能夠使用這些, - 請把這些設定加到 /etc/make.conf。 請注意在 - /etc/make.conf 的任何設定也會影響到每次使用 - make 的結果, - 因此設定一些適合自己系統的選項會是不錯的作法。 - - 一般使用者通常會從 - /usr/share/examples/etc/make.conf 複製 - CFLAGS 以及 NO_PROFILE - 之類的設定到 /etc/make.conf,並解除相關註解印記 - 。 + + documentation package + Updating and Upgrading + - 此外,也可以試試看其他設定 (COPTFLAGS、 - NOPORTDOCS 等等),是否符合自己所需。 - + 前一節說明了從原始碼更新 &os; 文件的方法。本節敘述使用 Ports Collection 的另一種方法: - - 更新 <filename>/etc</filename> 內的設定檔 + + + Install pre-built packages of the documentation, + without having to locally build anything or install the + documentation toolchain. + + + + Build the documentation sources through the ports + framework, making the checkout and build steps a bit + easier. + + + + This method of updating the &os; documentation is + supported by a set of documentation ports and packages which + are updated by the &a.doceng; on a monthly basis. These are + listed in the &os; Ports Collection, under the docs + category (http://www.freshports.org/docs/). + + Organization of the documentation ports is as + follows: + + + + The misc/freebsd-doc-en package or + port installs all of the English documentation. + + + + The misc/freebsd-doc-all + meta-package or port installs all documentation in all + available languages. + + + + There is a package and port for each translation, such + as misc/freebsd-doc-hu for the + Hungarian documentation. + + + + When binary packages are used, the &os; documentation will + be installed in all available formats for the given language. + For example, the following command will install the latest + package of the Hungarian documentation: - /etc 目錄會有系統的相關設定檔, - 以及開機時的各項服務啟動 script。 有些 script 隨 FreeBSD - 版本的不同而有些差異。 - - 其中有些設定檔會在每日運作的系統裡也會用到。 尤其是 - /etc/group - - 有時候在 make installworld 安裝過程中, - 會需要先建立某些特定帳號或群組。 在進行升級之前,它們可能並不存在, - 因此升級時就會造成問題。 有時候 make buildworld - 會先檢查這些所需的帳號或群組是否已有存在。 - - 舉個這樣的例子,像是某次升級之後必須新增 smmsp - 帳號。 若使用者尚未新增該帳號就要完成升級操作的話, - 會在 &man.mtree.8; 嘗試建立 /var/spool/clientmqueue - 時發生失敗。 - - 解法是在 buildworld 階段之前,先執行 &man.mergemaster.8; 並搭配 - 選項。 它會比對那些執行 - buildworld 或 - installworld 所需之關鍵設定檔。 - 若你所用的是早期仍未支援 的 - mergemaster 版本,那麼直接使用 source tree - 內的新版即可: - - &prompt.root; cd /usr/src/usr.sbin/mergemaster -&prompt.root; ./mergemaster.sh -p - - - 若您是偏執狂(paranoid), - 可以像下面這樣去試著檢查系統上有哪些檔案屬於已改名或被刪除的群組 - : - - &prompt.root; find / -group GID -print - - 這會顯示所有符合要找的 GID 群組 - (可以是群組名稱,或者是群組的數字代號)的所有檔案。 - - + &prompt.root; pkg install hu-freebsd-doc - - 切換到 Single User 模式 - single-user mode + + Packages use a format that differs from the + corresponding port's name: + lang-freebsd-doc, + where lang is the short format of + the language code, such as hu for + Hungarian, or zh_cn for Simplified + Chinese. + - 您可能會想在 single user 模式下編譯系統。 - 除了可以明顯更快完成之外,安裝過程中將會牽涉許多重要的系統檔案, - 包括所有系統 binaries、libraries、include 檔案等。 - 若在運作中的系統(尤其有許多使用者在用的時候)內更改這些檔案, - 那簡直是自找麻煩的作法。 - - multi-user mode - 另一種模式是先在 multi-user 模式下編譯好系統,然後再切到 single user - 模式去安裝。 若您比較喜歡這種方式,只需在 build(編譯過程) 完成之後, - 再去執行下面的步驟即可。 一直到可切換 single user 模式時,再去執行 - installkernel 或 - installworld 即可。 - - 切換為 root 身份打: - - &prompt.root; shutdown now - - 這樣就會從原本的 multi-user 模式切換到 single user 模式。 - - 除此之外也可以重開機,接著在開機選單處選擇 - single user 選項。 如此一來就會進入 single user 模式, - 然後在 shell 提示符號處輸入: + To specify the format of the documentation, build the port + instead of installing the package. For example, to build and + install the English documentation: + + &prompt.root; cd /usr/ports/misc/freebsd-doc-en +&prompt.root; make install clean + + The port provides a configuration menu where the format to + build and install can be specified. By default, split + HTML, similar to the format used on http://www.FreeBSD.org, + and PDF are selected. + + Alternately, several make options can + be specified when building a documentation port, + including: + + + + WITH_HTML - &prompt.root; fsck -p -&prompt.root; mount -u / -&prompt.root; mount -a -t ufs -&prompt.root; swapon -a + + Builds the HTML format with a single HTML file per + document. The formatted documentation is saved to a + file called article.html, or + book.html. + + - 這樣會先檢查檔案系統,並重新將 / - 改以可讀寫的模式掛載,以及 /etc/fstab - 內所設定的其他 UFS 檔案系統,最後啟用 swap 磁區。 + + WITH_PDF + + The formatted documentation is saved to a file + called article.pdf or + book.pdf. + + - - 若 CMOS 時鐘是設為當地時間,而非 GMT 時區(若 &man.date.1; - 指令沒顯示正確的時間、時區),那可能需要再輸入下列指令: -&prompt.root; adjkerntz -i - - 這步驟可以確認您的當地時區設定是否正確 — - 否則日後會造成一些問題。 - + + DOCBASE - + + Specifies where to install the documentation. It + defaults to + /usr/local/share/doc/freebsd. + + + - - 移除 <filename>/usr/obj</filename> + This example uses variables to install the Hungarian + documentation as a PDF in the specified + directory: + + &prompt.root; cd /usr/ports/misc/freebsd-doc-hu +&prompt.root; make -DWITH_PDF DOCBASE=share/doc/freebsd/hu install clean + + Documentation packages or ports can be updated using the + instructions in . For example, the + following command updates the installed Hungarian + documentation using ports-mgmt/portmaster + by using packages only: - 在重新編譯系統的過程中,編譯結果會放到(預設情況) - /usr/obj 內。 這裡面的目錄會對應到 - /usr/src 的目錄結構。 - - 砍掉這目錄,可以讓以後的 make buildworld - 過程更快一些,而且可避免以前編譯的東西跟現在的混淆在一起的相依錯亂 - 。 - - 而有些 /usr/obj 內的檔案可能會設定不可更動的 - flag(細節請參閱 &man.chflags.1;),而必須先拿掉這些 flag 設定才行 - 。 - - &prompt.root; cd /usr/obj -&prompt.root; chflags -R noschg * -&prompt.root; rm -rf * + &prompt.root; portmaster -PP hu-freebsd-doc + + + + 追蹤發展分支 - - 重新編譯 Base System + -CURRENT + -STABLE - - 保留編譯的紀錄 + &os; 有兩個發展分支: &os.current; and + &os.stable;. - 建議養成好習慣,把執行 &man.make.1; 時產生的紀錄存起來。 - 這樣若有哪邊出錯,就會有錯誤訊息的紀錄。 雖然單單這樣, - 你可能不知道如何分析是哪邊出了岔,但若把你問題記錄貼到 &os; 相關的 - mailing list 就可以有人可以幫忙看是怎麼一回事情。 - - 最簡單的方是就是用 &man.script.1; 指令,並加上參數 - (你想存放記錄的檔案位置、檔名)即可。 - 這步驟應該在重新編譯系統時就要作,然後在完成編譯後輸入 - exit 即可離開。 - - &prompt.root; script /var/tmp/mw.out -Script started, output file is /var/tmp/mw.out -&prompt.root; make TARGET -… compile, compile, compile … -&prompt.root; exit -Script done, … - - 對了,還有一點儘量別把檔案存到 - /tmp 目錄內。 因為重開機之後, - 這目錄內的東西都會被清空。 比較妥善的地方是 - /var/tmp (如上例所示) 或者是 - root 的家目錄。 - + 本節將將解釋每個分支和他的預設的使用者,以及如何保持每個分支系統在最新。 + + + 使用 &os.current; + + &os.current; 是 &os; 開發的最前線,&os.current;的使用者應有較強的技術能力。 + 技術能力較弱的使用者想追蹤發展分支應該追蹤 &os.stable;。 + + &os.current; 是 &os; 最新的原始碼,包括正在進行的工作, + 實驗性的改變,以及可能會或可能不會在下一個官方發行版出現的過渡時期機制。 + 而許多 &os; 開發者每天編譯 + &os.current; 原始碼,可能有一段短暫的時間原始碼無法編譯成功。 + 這些問題會儘快被解決,但是無論 + &os.current; 帶來災難或是新功能,在同步原始碼時都要考量的問題。 + + &os.current; 適合下列三類人: + + + + 積極致力於某一部份原始碼樹的 &os; 社群成員。 + + + + 擔任積極測試者的 &os; 社群成員。 + 他們願意花時間解決問題,提出 &os; 改變的建議和的大方向,並發布修補。 + + + + 關注某些事物,或是想參考目前的原始碼,或是偶爾提供註解或貢獻原始碼的使用者。 + + + + &os.current; 不應該被認為是在下一版發行前 + 取得最新功能的快速途徑,因為尚未發行的功能並未被完整測試,很可能有bug。 + 他也不是一個取得bug修補的快速方式, + 因為任何已知bug的修補有可能產生新的bug。 + &os.current; 沒有任何 + 官方支援 + + + -CURRENT + using + - - 編譯 Base System + 追蹤 &os.current;: - 首先請先切換到 /usr/src 目錄: + + + 加入 &a.current.name; 和 + &a.svn-src-head.name; 郵件論壇。這是必須的 + 以查看人們關於系統目前狀態的評論並接收 &os.current; 目前狀態的重要公告。 + + &a.svn-src-head.name; 論壇紀錄每一次修改的紀錄, + 和可能產生的副作用的相關資訊 + + 前往 &a.mailman.lists.link;,點選論壇來訂閱,照網頁指示的步驟作, + 為了追蹤整個原始碼樹,不是只有 &os.current; 的改變,請訂閱 + &a.svn-src-all.name;論壇。 + + + + 同步 &os.current; 原始碼。 + 通常使用 svn 來檢查 + 列出的 Subversion 鏡像站 head 分支的-CURRENT 原始碼。 + + 網路很慢或是受到限制的使用者可以如 + 所描述的,使用 CTM 來替代,但是他並不如 svn + 一樣值得信賴, svn 是建議的同步原始碼的方法。 + + + + 由於程式碼庫的大小的, + 有些使用者選擇只同步部份他們有興趣或提供修補貢獻的部份原始碼。然而, + 計劃從原始碼編譯整個作業系統的使用者必須下載 全部 + 的&os.current;,而不是只有選擇的部份。 + + 編譯 &os.current; 前 + + -CURRENT + compiling + ,請非常仔細地閱讀 /usr/src/Makefile + 並遵從 的指示。 + 閱讀 &a.current; 和 /usr/src/UPDATING + 來掌握下一發行版的最新狀態。 + + + + 熱血!很鼓勵 &os.current; 使用者 + 發表他們對加強哪些功能或是修復哪些錯誤的建議。 + 如果您在建議時能附上相關程式碼的話, 那真是太棒了! + + + + + + 使用 &os.stable; - &prompt.root; cd /usr/src + 主要發行版就是由 &os.stable; 這個開發分支而來。 + 修改進入這個分支的速度比較慢, + 並且假設這些修改已經先在 &os.current; 測試過。 + 這仍然是一個開發分支,而且在任何時候,&os.stable; 的原始碼都可能適合或不適合一般的使用。他就只是另一個開發分支,不是給終端使用者用的。 + 沒有要進行測試的使用者應該執行最新的 &os; 發行版。 + + 若有興趣去追蹤、貢獻 &os; 開發過程,尤其是會跟 + &os;接下來的發行版有關的人,應該考慮採用 &os.stable;。 + + 儘管 &os.stable; 應該在任何時候均能正確編譯、運作,但是沒有人能擔保一定如此。因為使用 &os.stable; 的人比 &os.current;多,無可避免地, + 有時候會在 &os.stable; 發現錯誤和極端的狀況,而這在 &os.current; 並非顯而易見。 + 由於上述這些理由,我們並不建議盲目追隨 &os.stable;。 + 特別重要的是 + 不要在沒有於開發或測試環境完整測試程式碼之前, + 升級任何上線的伺服器到 &os.stable;。 + + 追蹤 &os.stable;: + + + -STABLE + using + + + + 訂閱&a.stable.name;論壇來隨時瞭解 &os.stable; 編譯時的相依關係 + 或是其他特別需要注意的議題。 + 開發者在考慮一些有爭議的修正或更新時,就會先在這裡發信說明, 給使用者有機會可以反應, 看他們對所提的更改是否有什麼建議或問題。 + + 訂閱要追蹤分支的 svn相關論壇。 + 例如,追蹤9-STABLE分支的使用者應該訂閱&a.svn-src-stable-9.name;論壇。 + 這個論壇紀錄每一次修改的紀錄,和可能產生的副作用的相關資訊。 + + 前往&a.mailman.lists.link;,點選論壇來訂閱,照網頁指示的步驟作, + 為了追蹤整個原始碼樹,請訂閱&a.svn-src-all.name;論壇。 + + + + 要安裝一個新的 &os.stable; 系統, + 從 &os; 鏡像站安裝最新的 &os.stable; 發行版 + 或使用從 &os.stable;每個月的 snapshot built來安裝。更多關於快照的資訊, + 請參考www.freebsd.org/snapshots。 + + 要編譯或升級已經安裝的 &os; 系統至 &os.stable;, + 請使用svn + + Subversion + 來檢查要安裝分支的原始碼。 + 分支的名稱,例如 + stable/9,列在www.freebsd.org/releng。 + 如果沒有可靠的網際網路連線可以使用CTM () 。 + + + + + 在編譯或升級到 &os.stable; 之前 + + -STABLE + compiling + , 請仔細閱讀 /usr/src/Makefile + 並遵照的指示。閱讀 &a.stable; 和 + /usr/src/UPDATING下一發行版的最新狀態。 + + + + - (當然,除非你把 source code 放到其他地方,若真是這樣, - 就切換到那個目錄即可)。 - make + + 同步原始碼 - 使用 &man.make.1; 指令來重新編譯 world。 - 這指令會從 Makefile 檔(這檔會寫 &os; - 的程式該如何重新編譯、以哪些順序來編譯等等)去讀取相關指令。 + 有幾種不同的方法保持 &os; 原始碼在最新狀態。 + 這節比較這兩個主要的方法: SubversionCTM - 一般下指令的格式如下: + + 雖然有可能只更新部份原始碼樹,但是唯一支援的更新步驟是更新整個樹併重新編譯所有在使用者空間的程式, + 例如在/bin 和 + /sbin,以及核心原始碼。 + 只更新部份原始碼樹,只更新核心,或只更新使用者空間的程式時常會導致編譯錯誤,核心錯誤或是資料惡化等問題。 + - &prompt.root; make -x -DVARIABLE target + + Subversion + - 在這個例子, - 是你想傳給 &man.make.1; 的選項,細節說明請參閱 &man.make.1; 說明, - 裡面有相關範例說明。 + Subversion使用更新原始碼的 + pull 模型。使用者,或是 + cron 手稿語言, 呼叫更新本地端原始碼的 + svn 程式。 + Subversion is the + preferred method for updating local source trees as updates are + up-to-the-minute and the user controls when updates are + downloaded. It is easy to restrict updates to specific files or + directories and the requested updates are generated on the fly + by the server. 如何使用How to synchronize source using + Subversion同步原始碼,描述於 - - 則是把變數設定傳給 Makefile。 這些變數會控制 - Makefile 的行為。 這些設定與 - /etc/make.conf 的變數設定是一樣, - 只是另一種設定方式而已。 + + CTM + + CTM does not interactively + compare the local sources with those on the master archive or + otherwise pull them across. Instead, a script which identifies + changes in files since its previous run is executed several + times a day on the master CTM machine. Any detected changes are + compressed, stamped with a sequence-number, and encoded for + transmission over email in printable ASCII + only. Once downloaded, these deltas can + be run through ctm.rmail which will + automatically decode, verify, and apply the changes to the + user's copy of the sources. This process is more efficient than + Subversion and places less strain on + server resources since it is a push, rather + than a pull, model. Instructions for using + CTM to synchronize source can be + found at . + + If a user inadvertently wipes out portions of the local + archive, Subversion will detect and + rebuild the damaged portions. CTM + will not, and if a user deletes some portion of the source tree + and does not have a backup, they will have to start from scratch + from the most recent base delta and + rebuild it all with CTM. + - &prompt.root; make -DNO_PROFILE target + + 重新編譯 World - 上面的例子則是另一種設定方式,也就是哪些不要。 - 這個例子中的意思是不去編譯 profiled libraries,效果就如同設定在 - /etc/make.conf + + Rebuilding world + + 一旦本地端的原始碼樹和 &os; 的特定版本同步後, + 例如 &os.stable; or &os.current;,原始碼樹就可以用來重新編譯系統。 + 這個過程叫重新編譯 world。 + + 重新編譯 world 之前 ,請確定執行以下任務: + + + 編譯 world <emphasis>之前</emphasis> 執行這些任務 + + + 備份所有重要資料到另一個系統或是可攜式媒體, + 檢查備份的完整性,手邊準備一個可以開機的安裝媒體。 + 再次強調在重新編譯系統 + 之前,製作系統的備份是非常重要的。 While + rebuilding world is an easy task, there will inevitably be + times when mistakes in the source tree render the system + unbootable. You will probably never have to use the backup, + but it is better to be safe than sorry! + + + + mailing list + Review the recent &a.stable.name; or &a.current.name; + entries, depending upon the branch being tracked. Be aware + of any known problems and which systems are affected. If a + known issue affects the version of synchronized code, wait + for an all clear announcement to be posted + stating that the problem has been solved. Resynchronize the + sources to ensure that the local version of source has the + needed fix. + + + + Read /usr/src/UPDATING for any + extra steps necessary for that version of the source. This + file contains important information about potential problems + and may specify the order to run certain commands. Many + upgrades require specific additional steps such as renaming + or deleting specific files prior to installing the new + world. These will be listed at the end of this file where + the currently recommended upgrade sequence is explicitly + spelled out. If UPDATING contradicts + any steps in this chapter, the instructions in + UPDATING take precedence and should be + followed. + + - NO_PROFILE= true # Avoid compiling profiled libraries + + 不要使用 <command>make world</command> - target 則是告訴 &man.make.1; - 該去做哪些。 每個 Makefile 都會定義不同的 - targets,然後依您所給的 target 就會決定會做哪些動作 - 。 + 某些比較老的文件建議使用 make + world。然而,這個指令掠略過某些重要的步驟,只適合專家使用。 + 大部分的情況,使用 make world都是錯誤的,應該用這裡描述的步驟代替。 + + - Some targets are listed in the - Makefile, but are not meant for you to run. - Instead, they are used by the build process to break out the - steps necessary to rebuild the system into a number of - sub-steps. + + 程序概要 - Most of the time you will not need to pass any parameters to - &man.make.1;, and so your command like will look like - this: + The build world process assumes an upgrade from an older + &os; version using the source of a newer version that was + obtained using the instructions in . + + In &os;, the term world includes the + kernel, core system binaries, libraries, programming files, + and built-in compiler. The order in which these components + are built and installed is important. + + For example, the old compiler might have a bug and not be + able to compile the new kernel. Since the new kernel should + be built with the new compiler, the new compiler must be + built, but not necessarily installed, before the new kernel is + built. + + The new world might rely on new kernel features, so the + new kernel must be installed before the new world is + installed. The old world might not run correctly on the new + kernel, so the new world must be installed immediately upon + installing the new kernel. + + Some configuration changes must be made before the new + world is installed, but others might break the old world. + Hence, two different configuration upgrade steps are used. + For the most part, the update process only replaces or adds + files and existing old files are not deleted. Since this can + cause problems, /usr/src/UPDATING will + indicate if any files need to be manually deleted and at which + step to do so. - &prompt.root; make target + These concerns have led to the recommended upgrade + sequence described in the following procedure. - Where target will be one of - many build options. The first target should always be - buildworld. + + It is a good idea to save the output from running + make to a file. If something goes wrong, + a copy of the error message can be posted to one of the &os; + mailing lists. + + The easiest way to do this is to use + script with a parameter that specifies + the name of the file to save all output to. Do not save the + output to /tmp as this directory may be + cleared at next reboot. A better place to save the file is + /var/tmp. Run this command immediately + before rebuilding the world, and then type + exit when the process has + finished: - As the names imply, buildworld - builds a complete new tree under /usr/obj, - and installworld, another target, installs this tree on - the current machine. + &prompt.root; script /var/tmp/mw.out +Script started, output file is /var/tmp/mw.out + - Having separate options is very useful for two reasons. First, it allows you - to do the build safe in the knowledge that no components of - your running system will be affected. The build is - self hosted. Because of this, you can safely - run buildworld on a machine running - in multi-user mode with no fear of ill-effects. It is still - recommended that you run the - installworld part in single user - mode, though. + + Overview of Build World Process - Secondly, it allows you to use NFS mounts to upgrade - multiple machines on your network. If you have three machines, - A, B and C that you want to upgrade, run make - buildworld and make installworld on - A. B and C should then NFS mount /usr/src - and /usr/obj from A, and you can then run - make installworld to install the results of - the build on B and C. + The commands used in the build world process should be + run in the order specified here. This section summarizes + the function of each command. + + + If the build world process has previously been run on + this system, a copy of the previous build may still exist + in /usr/obj. To + speed up the new build world process, and possibly save + some dependency headaches, remove this directory if it + already exists: + + &prompt.root; chflags -R noschg /usr/obj/* +&prompt.root; rm -rf /usr/obj + + + + Compile the new compiler and a few related tools, then + use the new compiler to compile the rest of the new world. + The result is saved to /usr/obj. + + &prompt.root; cd /usr/src +&prompt.root; make buildworld + + + + Use the new compiler residing in /usr/obj to build the new + kernel, in order to protect against compiler-kernel + mismatches. This is necessary, as certain memory + structures may have changed, and programs like + ps and top will fail + to work if the kernel and source code versions are not the + same. + + &prompt.root; make buildkernel + + + + Install the new kernel and kernel modules, making it + possible to boot with the newly updated kernel. If + kern.securelevel has been raised above + 1 and + noschg or similar flags have been set + on the kernel binary, drop the system into single-user + mode first. Otherwise, this command can be run from + multi-user mode without problems. See &man.init.8; for + details about kern.securelevel and + &man.chflags.1; for details about the various file + flags. + + &prompt.root; make installkernel + + + + Drop the system into single-user mode in order to + minimize problems from updating any binaries that are + already running. It also minimizes any problems from + running the old world on a new kernel. - Although the world target still exists, - you are strongly encouraged not to use it. + &prompt.root; shutdown now - Run + Once in single-user mode, run these commands if the + system is formatted with UFS: - &prompt.root; make buildworld + &prompt.root; mount -u / +&prompt.root; mount -a -t ufs +&prompt.root; swapon -a - It is possible to specify a option to - make which will cause it to spawn several - simultaneous processes. This is most useful on multi-CPU machines. - However, since much of the compiling process is IO bound rather - than CPU bound it is also useful on single CPU machines. + If the system is instead formatted with ZFS, run these + two commands. This example assumes a zpool name of + zroot: + + &prompt.root; zfs set readonly=off zroot +&prompt.root; zfs mount -a + + + + Optional: If a keyboard mapping other than the default + US English is desired, it can be changed with + &man.kbdmap.1;: + + &prompt.root; kbdmap + + + + Then, for either file system, if the + CMOS clock is set to local time (this + is true if the output of &man.date.1; does not show the + correct time and zone), run: + + &prompt.root; adjkerntz -i + + + + Remaking the world will not update certain + directories, such as /etc, + /var and /usr, + with new or changed configuration files. The next step is + to perform some initial configuration file updates + to /etc in + preparation for the new world. The following command + compares only those files that are essential for the + success of installworld. For + instance, this step may add new groups, system accounts, + or startup scripts which have been added to &os; since the + last update. This is necessary so that the + installworld step will be able + to use any new system accounts, groups, and scripts. + Refer to for more detailed + instructions about this command: + + &prompt.root; mergemaster -p + + + + Install the new world and system binaries from + /usr/obj. - On a typical single-CPU machine you would run: + &prompt.root; cd /usr/src +&prompt.root; make installworld + - &prompt.root; make -j4 buildworld + + Update any remaining configuration files. - &man.make.1; will then have up to 4 processes running at any one - time. Empirical evidence posted to the mailing lists shows this - generally gives the best performance benefit. + &prompt.root; mergemaster -iF + - If you have a multi-CPU machine and you are using an SMP - configured kernel try values between 6 and 10 and see how they speed - things up. - + + Delete any obsolete files. This is important as they + may cause problems if left on the disk. + + &prompt.root; make delete-old + + + + A full reboot is now needed to load the new kernel and + new world with the new configuration files. + + &prompt.root; reboot + + + + Make sure that all installed ports have first been + rebuilt before old libraries are removed using the + instructions in . When + finished, remove any obsolete libraries to avoid conflicts + with newer ones. For a more detailed description of this + step, refer to . + + &prompt.root; make delete-old-libs + + - - Timings - - rebuilding world - timings - + single-user mode - Many factors influence the build time, but fairly recent - machines may only take a one or two hours to build - the &os.stable; tree, with no tricks or shortcuts used during the - process. A &os.current; tree will take somewhat longer. - + If the system can have a window of down-time, consider + compiling the system in single-user mode instead of compiling + the system in multi-user mode, and then dropping into + single-user mode for the installation. Reinstalling the + system touches a lot of important system files, all the + standard system binaries, libraries, and include files. + Changing these on a running system, particularly one with + active users, is asking for trouble. - - Compile and Install a New Kernel + + Configuration Files + - kernel - compiling + make.conf - To take full advantage of your new system you should recompile the - kernel. This is practically a necessity, as certain memory structures - may have changed, and programs like &man.ps.1; and &man.top.1; will - fail to work until the kernel and source code versions are the - same. - - The simplest, safest way to do this is to build and install a - kernel based on GENERIC. While - GENERIC may not have all the necessary devices - for your system, it should contain everything necessary to boot your - system back to single user mode. This is a good test that the new - system works properly. After booting from - GENERIC and verifying that your system works you - can then build a new kernel based on your normal kernel configuration - file. - - On &os; it is important to build world before building a - new kernel. - - If you want to build a custom kernel, and already have a configuration - file, just use KERNCONF=MYKERNEL - like this: + This build world process uses several configuration + files. - &prompt.root; cd /usr/src -&prompt.root; make buildkernel KERNCONF=MYKERNEL -&prompt.root; make installkernel KERNCONF=MYKERNEL - + The Makefile located in + /usr/src describes how the programs that + comprise &os; should be built and the order in which they + should be built. + + The options available to make are + described in &man.make.conf.5; and some common examples are + included in + /usr/share/examples/etc/make.conf. Any + options which are added to /etc/make.conf + will control the how make runs and builds + programs. These options take effect every time + make is used, including compiling + applications from the Ports Collection, compiling custom C + programs, or building the &os; operating system. Changes to + some settings can have far-reaching and potentially surprising + effects. Read the comments in both locations and keep in mind + that the defaults have been chosen for a combination of + performance and safety. - Note that if you have raised kern.securelevel - above 1 and you have set either the - noschg or similar flags to your kernel binary, you - might find it necessary to drop into single user mode to use - installkernel. Otherwise you should be able - to run both these commands from multi user mode without - problems. See &man.init.8; for details about - kern.securelevel and &man.chflags.1; for details - about the various file flags. - - - - Reboot into Single User Mode - single-user mode + + src.conf + - You should reboot into single user mode to test the new kernel - works. Do this by following the instructions in - . + How the operating system is built from source code is + controlled by /etc/src.conf. Unlike + /etc/make.conf, the contents of + /etc/src.conf only take effect when the + &os; operating system itself is being built. Descriptions of + the many options available for this file are shown in + &man.src.conf.5;. Be cautious about disabling seemingly + unneeded kernel modules and build options. Sometimes there + are unexpected or subtle interactions. - - Install the New System Binaries + + Variables and Targets - If you were building a version of &os; recent enough to have - used make buildworld then you should now use - installworld to install the new system - binaries. + The general format for using make is as + follows: - Run + &prompt.root; make -x -DVARIABLE target - &prompt.root; cd /usr/src -&prompt.root; make installworld + In this example, + is an option + passed to make. Refer to &man.make.1; for + examples of the available options. + + To pass a variable, specify the variable name with + . The + behavior of the Makefile is controlled by + variables. These can either be set in + /etc/make.conf or they can be specified + when using make. For example, this + variable specifies that profiled libraries should not be + built: + + &prompt.root; make -DNO_PROFILE target + + It corresponds with this setting in + /etc/make.conf: + + NO_PROFILE= true # Avoid compiling profiled libraries + + The target tells + make what to do and the + Makefile defines the available targets. + Some targets are used by the build process to break out the + steps necessary to rebuild the system into a number of + sub-steps. + + Having separate options is useful for two reasons. First, + it allows for a build that does not affect any components of a + running system. Because of this, + buildworld can be safely run on a + machine running in multi-user mode. It is still recommended + that installworld be run in part in + single-user mode, though. + + Secondly, it allows NFS mounts to be + used to upgrade multiple machines on a network, as described + in . + + It is possible to specify which will + cause make to spawn several simultaneous + processes. Since much of the compiling process is + I/O-bound rather than + CPU-bound, this is useful on both single + CPU and multi-CPU + machines. + + On a single-CPU machine, run the + following command to have up to 4 processes running at any one + time. Empirical evidence posted to the mailing lists shows + this generally gives the best performance benefit. + + &prompt.root; make -j4 buildworld + + On a multi-CPU machine, try values + between 6 and 10 to see + how they speed things up. + + + rebuilding world + timings + - If you specified variables on the make - buildworld command line, you must specify the same - variables in the make installworld command - line. This does not necessarily hold true for other options; - for example, must never be used with - installworld. + If any variables were specified to make + buildworld, specify the same variables to + make installworld. However, + must never be used + with installworld. - For example, if you ran: + For example, if this command was used: &prompt.root; make -DNO_PROFILE buildworld - you must install the results with: + Install the results with: &prompt.root; make -DNO_PROFILE installworld - otherwise it would try to install profiled libraries that - had not been built during the make buildworld - phase. + Otherwise, the second command will try to install + profiled libraries that were not built during the + make buildworld phase. - - Update Files Not Updated by <command>make installworld</command> - - Remaking the world will not update certain directories (in - particular, /etc, /var and - /usr) with new or changed configuration files. - - The simplest way to update these files is to use - &man.mergemaster.8;, though it is possible to do it manually - if you would prefer to do that. Regardless of which way you - choose, be sure to make a backup of /etc in - case anything goes wrong. - - - <command>mergemaster</command> - - TomRhodesContributed by - - - - mergemaster - - The &man.mergemaster.8; utility is a Bourne script that will - aid you in determining the differences between your configuration files - in /etc, and the configuration files in - the source tree /usr/src/etc. This is - the recommended solution for keeping the system configuration files up to date - with those located in the source tree. - - To begin simply type mergemaster at your prompt, and - watch it start going. mergemaster will then build a - temporary root environment, from / down, and populate - it with various system configuration files. Those files are then compared - to the ones currently installed in your system. At this point, files that - differ will be shown in &man.diff.1; format, with the sign - representing added or modified lines, and representing - lines that will be either removed completely, or replaced with a new line. - See the &man.diff.1; manual page for more information about the &man.diff.1; - syntax and how file differences are shown. - - &man.mergemaster.8; will then show you each file that displays variances, - and at this point you will have the option of either deleting the new file (referred - to as the temporary file), installing the temporary file in its unmodified state, - merging the temporary file with the currently installed file, or viewing the - &man.diff.1; results again. - - Choosing to delete the temporary file will tell &man.mergemaster.8; that we - wish to keep our current file unchanged, and to delete the new version. - This option is not recommended, unless you see no - reason to change the current file. You can get help at any time by - typing ? at the &man.mergemaster.8; prompt. If the user - chooses to skip a file, it will be presented again after all other files - have been dealt with. - - Choosing to install the unmodified temporary file will replace the - current file with the new one. For most unmodified files, this is the best - option. - - Choosing to merge the file will present you with a text editor, - and the contents of both files. You can now merge them by - reviewing both files side by side on the screen, and choosing parts from - both to create a finished product. When the files are compared side by side, - the l key will select the left contents and the - r key will select contents from your right. - The final output will be a file consisting of both parts, which can then be - installed. This option is customarily used for files where settings have been - modified by the user. - - Choosing to view the &man.diff.1; results again will show you the file differences - just like &man.mergemaster.8; did before prompting you for an option. - - After &man.mergemaster.8; is done with the system files you will be - prompted for other options. &man.mergemaster.8; may ask if you want to rebuild - the password file and will finish up with an option to - remove left-over temporary files. - + + + Merging Configuration Files + + + + + Tom + Rhodes + + Contributed by + + + - - Manual Update - - If you wish to do the update manually, however, - you cannot just copy over the files from - /usr/src/etc to /etc and - have it work. Some of these files must be installed - first. This is because the /usr/src/etc - directory is not a copy of what your - /etc directory should look like. In addition, - there are files that should be in /etc that are - not in /usr/src/etc. - - If you are using &man.mergemaster.8; (as recommended), - you can skip forward to the next - section. - - The simplest way to do this by hand is to install the - files into a new directory, and then work through them looking - for differences. + + + mergemaster + + - - Backup Your Existing <filename>/etc</filename> + &os; provides the &man.mergemaster.8; Bourne script to aid + in determining the differences between the configuration files + in /etc, and the configuration files in + /usr/src/etc. This is the recommended + solution for keeping the system configuration files up to date + with those located in the source tree. + + Before using mergemaster, it is + recommended to first copy the existing + /etc somewhere safe. Include + which does a recursive copy and + which preserves times and the ownerships + on files: + + &prompt.root; cp -Rp /etc /etc.old + + When run, mergemaster builds a + temporary root environment, from / down, + and populates it with various system configuration files. + Those files are then compared to the ones currently installed + in the system. Files that differ will be shown in + &man.diff.1; format, with the sign + representing added or modified lines, and + representing lines that will be either removed completely or + replaced with a new file. Refer to &man.diff.1; for more + information about how file differences are shown. + + Next, mergemaster will display each + file that differs, and present options to: delete the new + file, referred to as the temporary file, install the temporary + file in its unmodified state, merge the temporary file with + the currently installed file, or view the results + again. + + Choosing to delete the temporary file will tell + mergemaster to keep the current file + unchanged and to delete the new version. This option is not + recommended. To get help at any time, type + ? at the mergemaster + prompt. If the user chooses to skip a file, it will be + presented again after all other files have been dealt + with. + + Choosing to install the unmodified temporary file will + replace the current file with the new one. For most + unmodified files, this is the best option. + + Choosing to merge the file will present a text editor, and + the contents of both files. The files can be merged by + reviewing both files side by side on the screen, and choosing + parts from both to create a finished product. When the files + are compared side by side, l selects the left + contents and r selects contents from the + right. The final output will be a file consisting of both + parts, which can then be installed. This option is + customarily used for files where settings have been modified + by the user. + + Choosing to view the results again will redisplay the file + differences. + + After mergemaster is done with the + system files, it will prompt for other options. It may prompt + to rebuild the password file and will finish up with an option + to remove left-over temporary files. + + - does a recursive copy, - preserves times, ownerships on files and suchlike. - + + + Deleting Obsolete Files and Libraries - You need to build a dummy set of directories to install the new - /etc and other files into. - /var/tmp/root is a reasonable choice, and - there are a number of subdirectories required under this as - well. + + + + Anton + Shterenlikht + + Based on notes provided by + + + - &prompt.root; mkdir /var/tmp/root -&prompt.root; cd /usr/src/etc -&prompt.root; make DESTDIR=/var/tmp/root distrib-dirs distribution + + Deleting obsolete files and directories + - This will build the necessary directory structure and install the - files. A lot of the subdirectories that have been created under - /var/tmp/root are empty and should be deleted. - The simplest way to do this is to: - - &prompt.root; cd /var/tmp/root -&prompt.root; find -d . -type d | xargs rmdir 2>/dev/null - - This will remove all empty directories. (Standard error is - redirected to /dev/null to prevent the warnings - about the directories that are not empty.) - - /var/tmp/root now contains all the files that - should be placed in appropriate locations below - /. You now have to go through each of these - files, determining how they differ with your existing files. - - Note that some of the files that will have been installed in - /var/tmp/root have a leading .. At the - time of writing the only files like this are shell startup files in - /var/tmp/root/ and - /var/tmp/root/root/, although there may be others - (depending on when you are reading this). Make sure you use - ls -a to catch them. - - The simplest way to do this is to use &man.diff.1; to compare the - two files: - - &prompt.root; diff /etc/shells /var/tmp/root/etc/shells - - This will show you the differences between your - /etc/shells file and the new - /var/tmp/root/etc/shells file. Use these to decide whether to - merge in changes that you have made or whether to copy over your old - file. - - - Name the New Root Directory - (<filename>/var/tmp/root</filename>) with a Time Stamp, so You Can - Easily Compare Differences Between Versions - - Frequently rebuilding the world means that you have to update - /etc frequently as well, which can be a bit of - a chore. - - You can speed this process up by keeping a copy of the last set - of changed files that you merged into /etc. - The following procedure gives one idea of how to do this. - - - - Make the world as normal. When you want to update - /etc and the other directories, give the - target directory a name based on the current date. If you were - doing this on the 14th of February 1998 you could do the - following: + As a part of the &os; development lifecycle, files and + their contents occasionally become obsolete. This may be + because functionality is implemented elsewhere, the version + number of the library has changed, or it was removed from the + system entirely. These obsoleted files, libraries, and + directories should be removed when updating the system. + This ensures that the system is not cluttered with old files + which take up unnecessary space on the storage and backup + media. Additionally, if the old library has a security or + stability issue, the system should be updated to the newer + library to keep it safe and to prevent crashes caused by the + old library. Files, directories, and libraries which are + considered obsolete are listed in + /usr/src/ObsoleteFiles.inc. The + following instructions should be used to remove obsolete files + during the system upgrade process. + + After the make installworld and the + subsequent mergemaster have finished + successfully, check for obsolete files and libraries: - &prompt.root; mkdir /var/tmp/root-19980214 -&prompt.root; cd /usr/src/etc -&prompt.root; make DESTDIR=/var/tmp/root-19980214 \ - distrib-dirs distribution - + &prompt.root; cd /usr/src +&prompt.root; make check-old - - Merge in the changes from this directory as outlined - above. - - Do not remove the - /var/tmp/root-19980214 directory when you - have finished. - - - - When you have downloaded the latest version of the source - and remade it, follow step 1. This will give you a new - directory, which might be called - /var/tmp/root-19980221 (if you wait a week - between doing updates). - - - - You can now see the differences that have been made in the - intervening week using &man.diff.1; to create a recursive diff - between the two directories: - - &prompt.root; cd /var/tmp -&prompt.root; diff -r root-19980214 root-19980221 - - Typically, this will be a much smaller set of differences - than those between - /var/tmp/root-19980221/etc and - /etc. Because the set of differences is - smaller, it is easier to migrate those changes across into your - /etc directory. - - - - You can now remove the older of the two - /var/tmp/root-* directories: - - &prompt.root; rm -rf /var/tmp/root-19980214 - - - - Repeat this process every time you need to merge in changes - to /etc. - - + If any obsolete files are found, they can be deleted using + the following command: - You can use &man.date.1; to automate the generation of the - directory names: + &prompt.root; make delete-old - &prompt.root; mkdir /var/tmp/root-`date "+%Y%m%d"` - - - + A prompt is displayed before deleting each obsolete file. + To skip the prompt and let the system remove these files + automatically, use + BATCH_DELETE_OLD_FILES: - - Rebooting + &prompt.root; make -DBATCH_DELETE_OLD_FILES delete-old - You are now done. After you have verified that everything appears - to be in the right place you can reboot the system. A simple - &man.shutdown.8; should do it: + The same goal can be achieved by piping these commands + through yes: - &prompt.root; shutdown -r now - + &prompt.root; yes|make delete-old - - Finished + + Warning - You should now have successfully upgraded your &os; system. - Congratulations. + Deleting obsolete files will break applications that + still depend on those obsolete files. This is especially + true for old libraries. In most cases, the programs, ports, + or libraries that used the old library need to be recompiled + before make delete-old-libs is + executed. + - If things went slightly wrong, it is easy to rebuild a particular - piece of the system. For example, if you accidentally deleted - /etc/magic as part of the upgrade or merge of - /etc, the &man.file.1; command will stop working. - In this case, the fix would be to run: + Utilities for checking shared library dependencies include + sysutils/libchk and + sysutils/bsdadminscripts. + + Obsolete shared libraries can conflict with newer + libraries, causing messages like these: + + /usr/bin/ld: warning: libz.so.4, needed by /usr/local/lib/libtiff.so, may conflict with libz.so.5 +/usr/bin/ld: warning: librpcsvc.so.4, needed by /usr/local/lib/libXext.so, may conflict with librpcsvc.so.5 + + To solve these problems, determine which port installed + the library: + + &prompt.root; pkg which /usr/local/lib/libtiff.so + /usr/local/lib/libtiff.so was installed by package tiff-3.9.4 +&prompt.root; pkg which /usr/local/lib/libXext.so + /usr/local/lib/libXext.so was installed by package libXext-1.1.1,1 + + Then deinstall, rebuild, and reinstall the port. To + automate this process, + ports-mgmt/portmaster can be used. After + all ports are rebuilt and no longer use the old libraries, + delete the old libraries using the following command: + + &prompt.root; make delete-old-libs + + If something goes wrong, it is easy to rebuild a + particular piece of the system. For example, if + /etc/magic was accidentally deleted as + part of the upgrade or merge of /etc, + file will stop working. To fix this, + run: - &prompt.root; cd /usr/src/usr.bin/file + &prompt.root; cd /usr/src/usr.bin/file &prompt.root; make all install - - Questions + + Common Questions - - - - Do I need to re-make the world for every change? - - - - There is no easy answer to this one, as it depends on the - nature of the change. For example, if you just ran CVSup, and - it has shown the following files as being updated: + + + Do I need to re-make the world for every + change? + + + It depends upon the nature of the change. For + example, if svn only shows + the following files as being updated: src/games/cribbage/instr.c src/games/sail/pl_main.c @@ -1180,356 +1805,272 @@ src/release/sysinstall/media.c src/share/mk/bsd.port.mk - it probably is not worth rebuilding the entire world. - You could just go to the appropriate sub-directories and - make all install, and that's about it. But - if something major changed, for example - src/lib/libc/stdlib then you should either - re-make the world, or at least those parts of it that are - statically linked (as well as anything else you might have added - that is statically linked). - - At the end of the day, it is your call. You might be happy - re-making the world every fortnight say, and let changes - accumulate over that fortnight. Or you might want to re-make - just those things that have changed, and be confident you can - spot all the dependencies. - - And, of course, this all depends on how often you want to - upgrade, and whether you are tracking &os.stable; or - &os.current;. - - - - - - My compile failed with lots of signal 11 - signal 11 (or other signal - number) errors. What has happened? - - - - This is normally indicative of hardware problems. - (Re)making the world is an effective way to stress test your - hardware, and will frequently throw up memory problems. These - normally manifest themselves as the compiler mysteriously dying - on receipt of strange signals. - - A sure indicator of this is if you can restart the make and - it dies at a different point in the process. - - In this instance there is little you can do except start - swapping around the components in your machine to determine - which one is failing. - - - - - - Can I remove /usr/obj when I have - finished? - - - - The short answer is yes. - - /usr/obj contains all the object files - that were produced during the compilation phase. Normally, one - of the first steps in the make buildworld process is to - remove this directory and start afresh. In this case, keeping - /usr/obj around after you have finished - makes little sense, and will free up a large chunk of disk space - (currently about 340 MB). - - However, if you know what you are doing you can have - make buildworld skip this step. This will make subsequent - builds run much faster, since most of sources will not need to - be recompiled. The flip side of this is that subtle dependency - problems can creep in, causing your build to fail in odd ways. - This frequently generates noise on the &os; mailing lists, - when one person complains that their build has failed, not - realizing that it is because they have tried to cut - corners. - - - - - - Can interrupted builds be resumed? - - - - This depends on how far through the process you got before - you found a problem. - - In general (and this is not a hard and - fast rule) the make buildworld process builds new - copies of essential tools (such as &man.gcc.1;, and - &man.make.1;) and the system libraries. These tools and - libraries are then installed. The new tools and libraries are - then used to rebuild themselves, and are installed again. The - entire system (now including regular user programs, such as - &man.ls.1; or &man.grep.1;) is then rebuilt with the new - system files. - - If you are at the last stage, and you know it (because you - have looked through the output that you were storing) then you - can (fairly safely) do: + it probably is not worth rebuilding the entire + world. Instead, go into the appropriate sub-directories + and run make all install. But if + something major changes, such as + src/lib/libc/stdlib, consider + rebuilding world. + + Some users rebuild world every fortnight and let + changes accumulate over that fortnight. Others only + re-make those things that have changed and are careful + to spot all the dependencies. It all depends on how + often a user wants to upgrade and whether they are + tracking &os.stable; or &os.current;. + + - … fix the problem … -&prompt.root; cd /usr/src -&prompt.root; make -DNO_CLEAN all + + What would cause a compile to fail with lots of + signal 11 + signal 11 + + (or other signal number) errors? - This will not undo the work of the previous - make buildworld. + + This normally indicates a hardware problem. + Building world is an effective way to stress test + hardware, especially memory. A sure indicator of a + hardware issue is when make + is restarted and it dies at a different point in the + process. + + To resolve this error, swap out the components in + the machine, starting with RAM, to determine which + component is failing. + + + + + Can /usr/obj + be removed when finished? - If you see the message: + + This directory contains all the object files that + were produced during the compilation phase. Normally, + one of the first steps in the make + buildworld process is to remove this + directory and start afresh. Keeping + /usr/obj around when finished makes + little sense, and its removal frees up a approximately + 2GB of disk space. + + + + + Can interrupted builds be resumed? + + + This depends on how far into the process the + problem occurs. In general, make + buildworld builds new copies of essential + tools and the system libraries. These tools and + libraries are then installed, used to rebuild + themselves, and are installed again. The rest of the + system is then rebuilt with the new system + tools. + + During the last stage, it is fairly safe to run + these commands as they will not undo the work of the + previous make buildworld: - -------------------------------------------------------------- + &prompt.root; cd /usr/src +&prompt.root; make -DNO_CLEAN all + + If this message appears: + + -------------------------------------------------------------- Building everything.. -------------------------------------------------------------- - in the make buildworld output then it is - probably fairly safe to do so. + in the make buildworld output, + it is probably fairly safe to do so. - If you do not see that message, or you are not sure, then it - is always better to be safe than sorry, and restart the build + If that message is not displayed, it is always + better to be safe than sorry and to restart the build from scratch. - - + + - - - How can I speed up making the world? - - - - - - Run in single user mode. - - - - Put the /usr/src and - /usr/obj directories on separate - file systems held on separate disks. If possible, put these - disks on separate disk controllers. - - - - Better still, put these file systems across multiple - disks using the &man.ccd.4; (concatenated disk - driver) device. - - - - Turn off profiling (set NO_PROFILE=true in - /etc/make.conf). You almost certainly - do not need it. - - - - Also in /etc/make.conf, set - CFLAGS to something like . The optimization is much - slower, and the optimization difference between - and is normally - negligible. lets the compiler use - pipes rather than temporary files for communication, which - saves disk access (at the expense of memory). - - - - Pass the option to &man.make.1; to - run multiple processes in parallel. This usually helps - regardless of whether you have a single or a multi processor - machine. - - - The file system holding - /usr/src can be mounted (or remounted) - with the option. This prevents the - file system from recording the file access time. You probably - do not need this information anyway. - - &prompt.root; mount -u -o noatime /usr/src - - - The example assumes /usr/src is - on its own file system. If it is not (if it is a part of - /usr for example) then you will - need to use that file system mount point, and not - /usr/src. - - - - - The file system holding /usr/obj can - be mounted (or remounted) with the - option. This causes disk writes to happen asynchronously. - In other words, the write completes immediately, and the - data is written to the disk a few seconds later. This - allows writes to be clustered together, and can be a - dramatic performance boost. - - - Keep in mind that this option makes your file system - more fragile. With this option there is an increased - chance that, should power fail, the file system will be in - an unrecoverable state when the machine restarts. - - If /usr/obj is the only thing on - this file system then it is not a problem. If you have - other, valuable data on the same file system then ensure - your backups are fresh before you enable this - option. - - - &prompt.root; mount -u -o async /usr/obj - - - As above, if /usr/obj is not on - its own file system, replace it in the example with the - name of the appropriate mount point. - - - - - - - - - What do I do if something goes wrong? - - - - Make absolutely sure your environment has no - extraneous cruft from earlier builds. This is simple - enough. + + Is it possible to speed up making the world? - &prompt.root; chflags -R noschg /usr/obj/usr + + Several actions can speed up the build world + process. For example, the entire process can be run + from single-user mode. However, this will prevent users + from having access to the system until the process is + complete. + + Careful file system design or the use of ZFS + datasets can make a difference. Consider putting + /usr/src and + /usr/obj on + separate file systems. If possible, place the file + systems on separate disks on separate disk controllers. + When mounting /usr/src, use + which prevents the file system + from recording the file access time. If /usr/src is not on its + own file system, consider remounting /usr with + . + + The file system holding /usr/obj can be mounted + or remounted with so that disk + writes happen asynchronously. The write completes + immediately, and the data is written to the disk a few + seconds later. This allows writes to be clustered + together, and can provide a dramatic performance + boost. + + + Keep in mind that this option makes the file + system more fragile. With this option, there is an + increased chance that, should power fail, the file + system will be in an unrecoverable state when the + machine restarts. + + If /usr/obj is the only + directory on this file system, this is not a problem. + If you have other, valuable data on the same file + system, ensure that there are verified backups before + enabling this option. + + + Turn off profiling by setting + NO_PROFILE=true in + /etc/make.conf. + + Pass + to &man.make.1; to run multiple processes in parallel. + This usually helps on both single- and multi-processor + machines. + + + + + What if something goes wrong? + + + First, make absolutely sure that the environment has + no extraneous cruft from earlier builds: + + &prompt.root; chflags -R noschg /usr/obj/usr &prompt.root; rm -rf /usr/obj/usr &prompt.root; cd /usr/src &prompt.root; make cleandir &prompt.root; make cleandir - Yes, make cleandir really should - be run twice. + Yes, make cleandir really should + be run twice. - Then restart the whole process, starting - with make buildworld. + Then, restart the whole process, starting with + make buildworld. - If you still have problems, send the error and the - output of uname -a to &a.questions;. - Be prepared to answer other questions about your - setup! - - - + If problems persist, send the error and the output + of uname -a to &a.questions;. Be + prepared to answer other questions about the + setup! + + + - Tracking for Multiple Machines + + 追蹤多追蹤多部機器 + - MikeMeyerContributed by + + + Mike + Meyer + + Contributed by + - + NFS - installing multiple machines + 安裝多部機器 - If you have multiple machines that you want to track the - same source tree, then having all of them download sources and - rebuild everything seems like a waste of resources: disk space, - network bandwidth, and CPU cycles. It is, and the solution is - to have one machine do most of the work, while the rest of the - machines mount that work via NFS. This section outlines a - method of doing so. - - - Preliminaries - - First, identify a set of machines that is going to run - the same set of binaries, which we will call a - build set. Each machine can have a - custom kernel, but they will be running the same userland - binaries. From that set, choose a machine to be the - build machine. It is going to be the - machine that the world and kernel are built on. Ideally, it - should be a fast machine that has sufficient spare CPU to - run make buildworld and - make buildkernel. You will also want to - choose a machine to be the test - machine, which will test software updates before they - are put into production. This must be a - machine that you can afford to have down for an extended - period of time. It can be the build machine, but need not be. - - All the machines in this build set need to mount - /usr/obj and - /usr/src from the same machine, and at - the same point. Ideally, those are on two different drives - on the build machine, but they can be NFS mounted on that machine - as well. If you have multiple build sets, - /usr/src should be on one build machine, and - NFS mounted on the rest. - - Finally make sure that - /etc/make.conf on all the machines in - the build set agrees with the build machine. That means that - the build machine must build all the parts of the base - system that any machine in the build set is going to - install. Also, each build machine should have its kernel - name set with KERNCONF in - /etc/make.conf, and the build machine - should list them all in KERNCONF, listing - its own kernel first. The build machine must have the kernel - configuration files for each machine in - /usr/src/sys/arch/conf - if it is going to build their kernels. - - - - The Base System - - Now that all that is done, you are ready to build - everything. Build the kernel and world as described in on the build machine, - but do not install anything. After the build has finished, go - to the test machine, and install the kernel you just - built. If this machine mounts /usr/src - and /usr/obj via NFS, when you reboot - to single user you will need to enable the network and mount - them. The easiest way to do this is to boot to multi-user, - then run shutdown now to go to single user - mode. Once there, you can install the new kernel and world and run - mergemaster just as you normally would. When - done, reboot to return to normal multi-user operations for this - machine. - - After you are certain that everything on the test - machine is working properly, use the same procedure to - install the new software on each of the other machines in - the build set. - - - - Ports - - The same ideas can be used for the ports tree. The first - critical step is mounting /usr/ports from - the same machine to all the machines in the build set. You can - then set up /etc/make.conf properly to share - distfiles. You should set DISTDIR to a - common shared directory that is writable by whichever user - root is mapped to by your NFS mounts. Each - machine should set WRKDIRPREFIX to a - local build directory. Finally, if you are going to be - building and distributing packages, you should set - PACKAGES to a directory similar to - DISTDIR. - + 當有多部機器需要追蹤同一個原始碼樹, + 會浪費磁碟空間,網路頻寬,和 + 中央處理器周期來讓每個系統下載原始碼和編譯所有東西。 + 解決方法是有解決方法是有一台機器做大部份的工作,剩下的機器經由 + NFS來掛載這個工作。這一節概述了一個如此做的方法。 + 更多關於使用 NFS的資訊,請參考 + + First, identify a set of machines which will run the same + set of binaries, known as a build set. + Each machine can have a custom kernel, but will run the same + userland binaries. From that set, choose a machine to be the + build machine that the world and kernel + are built on. Ideally, this is a fast machine that has + sufficient spare CPU to run make + buildworld and make + buildkernel. + + Select a machine to be the test + machine, which will test software updates before + they are put into production. This must be + a machine that can afford to be down for an extended period of + time. It can be the build machine, but need not be. + + All the machines in this build set need to mount + /usr/obj and /usr/src + from the build machine via NFS. For multiple + build sets, /usr/src should be on one build + machine, and NFS mounted on the rest. + + Ensure that /etc/make.conf and + /etc/src.conf on all the machines in the + build set agree with the build machine. That means that the + build machine must build all the parts of the base system that + any machine in the build set is going to install. Also, each + build machine should have its kernel name set with + KERNCONF in + /etc/make.conf, and the build machine + should list them all in its KERNCONF, + listing its own kernel first. The build machine must have the + kernel configuration files for each machine in its /usr/src/sys/arch/conf. + + On the build machine, build the kernel and world as + described in , but do not install + anything on the build machine. Instead, install the built + kernel on the test machine. On the test machine, mount + /usr/src and + /usr/obj via NFS. Then, + run shutdown now to go to single-user mode in + order to install the new kernel and world and run + mergemaster as usual. When done, reboot to + return to normal multi-user operations. + + After verifying that everything on the test machine is + working properly, use the same procedure to install the new + software on each of the other machines in the build set. + + The same methodology can be used for the ports tree. The + first step is to share /usr/ports via + NFS to all the machines in the build set. To + configure /etc/make.conf to share + distfiles, set DISTDIR to a common shared + directory that is writable by whichever user root is mapped to by the + NFS mount. Each machine should set + WRKDIRPREFIX to a local build directory, if + ports are to be built locally. Alternately, if the build system + is to build and distribute packages to the machines in the build + set, set PACKAGES on the build system to a + directory similar to DISTDIR. Index: zh_TW.UTF-8/books/handbook/disks/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/disks/chapter.xml +++ zh_TW.UTF-8/books/handbook/disks/chapter.xml @@ -1,17 +1,20 @@ - + + 儲存設備篇 概述 - 本章涵蓋如何在 FreeBSD 下使用碟片裝置 譯註:雖然有些設備沒有『碟片』,例如 USB 隨身碟, @@ -343,327 +346,152 @@ - - RAID - - - 軟體 RAID - - - 連接式磁碟裝置驅動程式(CCD, Concatenated Disk Driver) 設定 - - ChristopherShumwayOriginal work by - - - JimBrownRevised by - - - - - -RAIDsoftware - - RAIDCCD - - - 對大容量儲存設備而言,最關鍵的要素乃是速度、可靠性及價格。 - 然而這三者往往難以兼顧:快速可靠的設備通常很貴; - 而降低成本通常也犧牲了速度或可靠性。 - - 接下來要介紹的系統,價格是最重要的考量,接下來是速度, - 最後才是可靠性。 順序如此是因為資料傳輸的速度最終取決於網路, - 而儘管可靠性十分重要,卻有簡單的取代方案: - 將資料完整備份於 CD-R 中。 - - 選擇大容量儲存設備方案時,首先要定義您的需求。 - 如果您重視速度或可靠性甚於價格,接下來的介紹恐非您所需。 - - - 安裝硬體 - - 除了系統磁碟外,下面介紹的 CCD 磁碟陣列將使用到三顆 30GB、 - 5400 RPM 的 Western Digital IDE 磁碟,以提供約 90GB 的儲存空間。 - 最理想的情況是每個磁碟由獨立使用的排線連接獨立使用的 IDE 控制器, - 不過為了降低成本,利用 jumper 設定磁碟,使每個 IDE 控制器可連接 - 一個主磁碟加一個副磁碟,如此可不必加裝額外的 IDE 控制器。 - - 開機後,BIOS 應該設定成自重偵測磁碟。更重要的是 FreeBSD 應該 - 要偵測到它們: - - ad0: 19574MB <WDC WD205BA> [39770/16/63] at ata0-master UDMA33 -ad1: 29333MB <WDC WD307AA> [59598/16/63] at ata0-slave UDMA33 -ad2: 29333MB <WDC WD307AA> [59598/16/63] at ata1-master UDMA33 -ad3: 29333MB <WDC WD307AA> [59598/16/63] at ata1-slave UDMA33 - - - 如果 FreeBSD 沒有偵測到所有磁碟,請確認 jumper 都設定正確。 - 許多 IDE 磁碟可以設定成 Cable Select - (根據排線位置決定),這並非 master(主磁碟) - 或 slave(副磁碟)。 請參閱磁碟的說明文件以正確設定 jumper - 。 - - 接下來,考慮如何將它們變成檔案系統的一部份。您可以參考 - &man.vinum.8;() 及 &man.ccd.4;。 - 在此我們選擇 &man.ccd.4;。 - - - - 設定 CCD - - &man.ccd.4; 可以將多個磁碟接起來成為一個大磁碟。要使用 - &man.ccd.4;,您的 kernel 需要支援 &man.ccd.4;。將這行加入到 - kernel 設定檔,並重編、重安裝 kernel: - - device ccd - - 也可以載入 kernel 動態模組來支援 &man.ccd.4;。 - - 使用 &man.ccd.4; 請先用 &man.bsdlabel.8; 來初始磁碟: - - bsdlabel -r -w ad1 auto -bsdlabel -r -w ad2 auto -bsdlabel -r -w ad3 auto - - 上述指令會建立 ad1c, - ad2cad3c, - 這些 bsdlabel 都使用了整個磁碟。 - - 下一步是修改 label type,同樣用 &man.bsdlabel.8; 來處理: - - bsdlabel -e ad1 -bsdlabel -e ad2 -bsdlabel -e ad3 - - 這個指令會打開一個編輯器(預設是 &man.vi.1;,可以用 - EDITOR 環境變數來指定其它編輯器),並將目前磁碟的 label - 資訊顯示在該編輯器裡。 - - 一個還未變動過的磁碟 label 資訊看起來會像這樣: - - 8 partitions: -# size offset fstype [fsize bsize bps/cpg] - c: 60074784 0 unused 0 0 0 # (Cyl. 0 - 59597) - - 在此我們要新增一個 e partition 給 - &man.ccd.4; 使用。 通常複製 c partition 那一行, - 再把 那一行改成 - 4.2BSD 就可以了。 - 改完之後看起來應該會像這樣: - - 8 partitions: -# size offset fstype [fsize bsize bps/cpg] - c: 60074784 0 unused 0 0 0 # (Cyl. 0 - 59597) - e: 60074784 0 4.2BSD 0 0 0 # (Cyl. 0 - 59597) - - - - - 建立檔案系統 - - 現在所有的磁碟都已經建好 bsdlabel 了,可以開始建立 &man.ccd.4;。 - 用 &man.ccdconfig.8; 來建立 &man.ccd.4;,參考下面的指令: - - ccdconfig ccd0 32 0 /dev/ad1e /dev/ad2e /dev/ad3e - - 每個參數的作用如下: - - - - 第一個參數是要設定的裝置名稱,在這個例子裡是 - /dev/ccd0c。其中 /dev/ - 可有可無。 - - - - - 「interleave」的大小。所謂 interleave 是指一排磁碟區塊 - (disk block)的大小,通常以 512 bytes 為單位,所以 interleave - 設為 32 即為 16,384 bytes。 - - - - &man.ccdconfig.8; 設定模式的參數。如果您打算啟用磁碟鏡設 - (drive mirroring),您可以在此指定參數。這個例子沒有使用鏡設, - 所以設成 0。 - - - - &man.ccdconfig.8; 最後的參數是要加入到陣列的所有磁碟。 - 請使用完整的路徑。 - - - - - 執行 &man.ccdconfig.8; 之後,&man.ccd.4; - 已設定完成可供建立檔案系統。 請參考 &man.newfs.8; 或輸入: - - newfs /dev/ccd0c - - - - - - 讓一切自動完成 - - 通常您會希望每次開機時都能自動掛上(mount) &man.ccd.4;。 - 用下面的指令將您目前的設定寫入 /etc/ccd.conf - : - - ccdconfig -g > /etc/ccd.conf - - 如果 /etc/ccd.conf 存在,每次開機時 - /etc/rc 都會執行 ccdconfig -C - 。 如此便可自動設定 &man.ccd.4; 以便之後掛上(mount)檔案系統。 - - - 如果您開機時選擇進入單人模式(single mode),在掛上 - (&man.mount.8;) &man.ccd.4; 的檔案系統之前您得先執行設定的指令: - - - ccdconfig -C - - - 要在每次開機時自動掛上(mount) &man.ccd.4;,請在 - /etc/fstab 加入 &man.ccd.4;: - - - /dev/ccd0c /media ufs rw 2 2 - - - - - Vinum 容量管理系統 - -RAIDsoftware - - RAID - Vinum - - - Vinum 容量管理系統(以下簡稱 Vinum) 可視為一種虛擬磁碟。 - 它將區塊裝置(block device) 的介面與對應資料的方式切割開來,比起原本 - slice 劃分的磁碟,Vinum 可增加了彈性、效能和穩定度 - 譯註:原文這裡是用「和」,但要視實際使用方式而定。 - 例如用 RAID-0 就不會增加穩定度 :)。 - &man.vinum.8; 實作了 RAID-0、RAID-1 和 RAID-5 等模組, - 它們都可以單獨使用,也可以互相搭配使用。 - - 請見 以參考更多關於 - &man.vinum.8; 的資訊。 - - - - - 硬體 RAID - - - RAID - hardware - - - FreeBSD 也支援許多硬體 RAID 控制器。 - 這些控制器自行掌控一個小型的 RAID 系統, - 因此不需要特定軟體來管理。 - - 透過控制器上的 BIOS 幾乎能控制所有的操作。 - 接下來將簡單介紹如何設定 Promise IDE - RAID 控制卡。首先確認控制卡已安裝,接著開機。 - 它應該會提示一些資訊譯註:例如按 F1 可以進入控制卡 - BIOS 之類的資訊。。依指示進入控制卡的設定畫面, - 從這裡您可以將全部的硬體結合成一個大磁碟。完成之後,FreeBSD - 將只會看到這個大磁碟。當然您也可以使用其它的 - RAID 模式。 - + + + Resizing and Growing Disks - - 重建(rebuild) ATA RAID1 陣列 - - FreeBSD 允許您熱插拔磁碟陣列裡壞掉的磁碟, - 當然在重開機前就得先發現。 - - 也許您會在 /var/log/messages(或 &man.dmesg.8; - 的輸出) 看到類似下面的訊息: - - ad6 on monster1 suffered a hard error. -ad6: READ command timeout tag=0 serv=0 - resetting -ad6: trying fallback to PIO mode -ata3: resetting devices .. done -ad6: hard error reading fsbn 1116119 of 0-7 (ad6 bn 1116119; cn 1107 tn 4 sn 11)\\ -status=59 error=40 -ar0: WARNING - mirror lost - - 請用 &man.atacontrol.8; 來得到更多資訊: - - &prompt.root; atacontrol list -ATA channel 0: - Master: no device present - Slave: acd0 <HL-DT-ST CD-ROM GCR-8520B/1.00> ATA/ATAPI rev 0 - -ATA channel 1: - Master: no device present - Slave: no device present - -ATA channel 2: - Master: ad4 <MAXTOR 6L080J4/A93.0500> ATA/ATAPI rev 5 - Slave: no device present - -ATA channel 3: - Master: ad6 <MAXTOR 6L080J4/A93.0500> ATA/ATAPI rev 5 - Slave: no device present - -&prompt.root; atacontrol status ar0 -ar0: ATA RAID1 subdisks: ad4 ad6 status: DEGRADED - - - - 首先您得將損壞磁碟所在的 ata channel 卸載(detach), - 如此才能安全地移除: + + + + Allan + Jude + + Originally contributed by + + + - &prompt.root; atacontrol detach ata3 - + + disks + resizing + - - 用好的磁碟換下損壞的。 - + A disk's capacity can increase without any changes to the + data already present. This happens commonly with virtual + machines, when the virtual disk turns out to be too small and is + enlarged. Sometimes a disk image is written to a + USB memory stick, but does not use the full + capacity. Here we describe how to resize or + grow disk contents to take advantage of + increased capacity. + + Determine the device name of the disk to be resized by + inspecting /var/run/dmesg.boot. In this + example, there is only one SATA disk in the + system, so the drive will appear as + ada0. - - 重新載入(re-attach) ata channel: + partitions + + gpart + - &prompt.root; atacontrol attach ata3 -Master: ad6 <MAXTOR 6L080J4/A93.0500> ATA/ATAPI rev 5 -Slave: no device present - + List the partitions on the disk to see the current + configuration: - - 將新的磁碟加入原本的磁碟陣列成為備援(spare) 磁碟: + &prompt.root; gpart show ada0 +=> 34 83886013 ada0 GPT (48G) [CORRUPT] + 34 128 1 freebsd-boot (64k) + 162 79691648 2 freebsd-ufs (38G) + 79691810 4194236 3 freebsd-swap (2G) + 83886046 1 - free - (512B) - &prompt.root; atacontrol addspare ar0 ad6 - + + If the disk was formatted with the + GPT partitioning scheme, it may show + as corrupted because the GPT + backup partition table is no longer at the end of the + drive. Fix the backup + partition table with + gpart: - - 重建磁碟陣列: + &prompt.root; gpart recover ada0 +ada0 recovered + - &prompt.root; atacontrol rebuild ar0 - + Now the additional space on the disk is available for + use by a new partition, or an existing partition can be + expanded: + + &prompt.root; gpart show ada0 +=> 34 102399933 ada0 GPT (48G) + 34 128 1 freebsd-boot (64k) + 162 79691648 2 freebsd-ufs (38G) + 79691810 4194236 3 freebsd-swap (2G) + 83886046 18513921 - free - (8.8G) + + Partitions can only be resized into contiguous free space. + Here, the last partition on the disk is the swap partition, but + the second partition is the one that needs to be resized. Swap + partitions only contain temporary data, so it can safely be + unmounted, deleted, and then recreated after resizing other + partitions. + + &prompt.root; swapoff /dev/ada0p3 +&prompt.root; gpart delete -i 3 ada0 +ada0p3 deleted +&prompt.root; gpart show ada0 +=> 34 102399933 ada0 GPT (48G) + 34 128 1 freebsd-boot (64k) + 162 79691648 2 freebsd-ufs (38G) + 79691810 22708157 - free - (10G) + + + There is risk of data loss when modifying the partition + table of a mounted file system. It is best to perform the + following steps on an unmounted file system while running from + a live CD-ROM or USB + device. However, if absolutely necessary, a mounted file + system can be resized after disabling GEOM safety + features: + + &prompt.root; sysctl kern.geom.debugflags=16 + + + Resize the partition, leaving room to recreate a swap + partition of the desired size. This only modifies the size of + the partition. The file system in the partition will be + expanded in a separate step. + + &prompt.root; gpart resize -i 2 -a 4k -s 47G ada0 +ada0p2 resized +&prompt.root; gpart show ada0 +=> 34 102399933 ada0 GPT (48G) + 34 128 1 freebsd-boot (64k) + 162 98566144 2 freebsd-ufs (47G) + 98566306 3833661 - free - (1.8G) + + Recreate the swap partition: + + &prompt.root; gpart add -t freebsd-swap -a 4k ada0 +ada0p3 added +&prompt.root; gpart show ada0 +=> 34 102399933 ada0 GPT (48G) + 34 128 1 freebsd-boot (64k) + 162 98566144 2 freebsd-ufs (47G) + 98566306 3833661 3 freebsd-swap (1.8G) +&prompt.root; swapon /dev/ada0p3 - - 可以用下面指定來確認重建的進度: + Grow the UFS file system to use the new + capacity of the resized partition: - &prompt.root; dmesg | tail -10 -[output removed] -ad6: removed from configuration -ad6: deleted from ar0 disk1 -ad6: inserted into ar0 disk1 as spare + + Growing a live UFS file system is only + possible in &os; 10.0-RELEASE and later. For earlier + versions, the file system must not be mounted. + -&prompt.root; atacontrol status ar0 -ar0: ATA RAID1 subdisks: ad4 ad6 status: REBUILDING 0% completed - + &prompt.root; growfs /dev/ada0p2 +Device is mounted read-write; resizing will result in temporary write suspension for /. +It's strongly recommended to make a backup before growing the file system. +OK to grow file system on /dev/ada0p2, mounted on /, from 38GB to 47GB? [Yes/No] Yes +super-block backups (for fsck -b #) at: + 80781312, 82063552, 83345792, 84628032, 85910272, 87192512, 88474752, + 89756992, 91039232, 92321472, 93603712, 94885952, 96168192, 97450432 - - 等重建完就完成了。 - - - + Both the partition and the file system on it have now been + resized to use the newly-available disk space. @@ -777,223 +605,176 @@ - Creating and Using Optical Media (CDs) + + Creating and Using <acronym>CD</acronym> Media + - MikeMeyerContributed by + + + Mike + Meyer + + Contributed by + - - - CDROMs + CD-ROMs creating - - Introduction + Compact Disc (CD) media provide a number + of features that differentiate them from conventional disks. + They are designed so that they can be read continuously without + delays to move the head between tracks. While + CD media do have tracks, these refer to a + section of data to be read continuously, and not a physical + property of the disk. The ISO 9660 file + system was designed to deal with these differences. + + ISO + 9660 + + file systems + ISO 9660 + + + + CD burner + ATAPI + - CDs have a number of features that differentiate them from - conventional disks. Initially, they were not writable by the - user. They are designed so that they can be read continuously without - delays to move the head between tracks. They are also much easier - to transport between systems than similarly sized media were at the - time. - - CDs do have tracks, but this refers to a section of data to - be read continuously and not a physical property of the disk. To - produce a CD on FreeBSD, you prepare the data files that are going - to make up the tracks on the CD, then write the tracks to the - CD. - - ISO 9660 - - file systems - ISO 9660 - - The ISO 9660 file system was designed to deal with these - differences. It unfortunately codifies file system limits that were - common then. Fortunately, it provides an extension mechanism that - allows properly written CDs to exceed those limits while still - working with systems that do not support those extensions. - - - sysutils/cdrtools - - The sysutils/cdrtools - port includes &man.mkisofs.8;, a program that you can use to - produce a data file containing an ISO 9660 file - system. It has options that support various extensions, and is - described below. - - - CD burner - ATAPI - - Which tool to use to burn the CD depends on whether your CD burner - is ATAPI or something else. ATAPI CD burners use the burncd program that is part of - the base system. SCSI and USB CD burners should use - cdrecord from - the sysutils/cdrtools port. - - burncd has a limited number of - supported drives. To find out if a drive is supported, see the - CD-R/RW supported - drives list. + The &os; Ports Collection provides several utilities for + burning and duplicating audio and data CDs. + This chapter demonstrates the use of several command line + utilities. For CD burning software with a + graphical utility, consider installing the + sysutils/xcdroast or + sysutils/k3b packages or ports. + + + + Supported Devices + + + + + Marc + Fonvieille + + Contributed by + + + - - CD burner + CD burner ATAPI/CAM driver - If you run &os; 5.X, &os; 4.8-RELEASE version or - higher, it will be possible to use cdrecord and other tools - for SCSI drives on an ATAPI hardware with the ATAPI/CAM module. - - - If you want a CD burning software with a graphical user - interface, you should have a look to - X-CD-Roast or - K3b. These tools are available as - packages or from the sysutils/xcdroast and sysutils/k3b ports. - X-CD-Roast and - K3b require the ATAPI/CAM module with ATAPI - hardware. - - - mkisofs + The GENERIC kernel provides support + for SCSI, USB, and + ATAPI CD readers and + burners. If a custom kernel is used, the options that need to + be present in the kernel configuration file vary by the type + of device. + + For a SCSI burner, make sure these + options are present: + + device scbus # SCSI bus (required for ATA/SCSI) +device da # Direct Access (disks) +device pass # Passthrough device (direct ATA/SCSI access) +device cd # needed for CD and DVD burners + + For a USB burner, make sure these + options are present: + + device scbus # SCSI bus (required for ATA/SCSI) +device da # Direct Access (disks) +device pass # Passthrough device (direct ATA/SCSI access) +device cd # needed for CD and DVD burners +device uhci # provides USB 1.x support +device ohci # provides USB 1.x support +device ehci # provides USB 2.0 support +device xhci # provides USB 3.0 support +device usb # USB Bus (required) +device umass # Disks/Mass storage - Requires scbus and da + + For an ATAPI burner, make sure these + options are present: + + device ata # Legacy ATA/SATA controllers +device scbus # SCSI bus (required for ATA/SCSI) +device pass # Passthrough device (direct ATA/SCSI access) +device cd # needed for CD and DVD burners - The &man.mkisofs.8; program, which is part of the - sysutils/cdrtools port, - produces an ISO 9660 file system - that is an image of a directory tree in the &unix; file system name - space. The simplest usage is: - - &prompt.root; mkisofs -o imagefile.iso /path/to/tree - - - file systems - ISO 9660 - - This command will create an imagefile.iso - containing an ISO 9660 file system that is a copy of the tree at - /path/to/tree. In the process, it will - map the file names to names that fit the limitations of the - standard ISO 9660 file system, and will exclude files that have - names uncharacteristic of ISO file systems. - - - file systems - HFS - - - file systems - Joliet - - A number of options are available to overcome those - restrictions. In particular, enables the - Rock Ridge extensions common to &unix; systems, - enables Joliet extensions used by Microsoft systems, and - can be used to create HFS file systems used - by &macos;. - - For CDs that are going to be used only on FreeBSD systems, - can be used to disable all filename - restrictions. When used with , it produces a - file system image that is identical to the FreeBSD tree you started - from, though it may violate the ISO 9660 standard in a number of - ways. - - - CDROMs - creating bootable - - The last option of general use is . This is - used to specify the location of the boot image for use in producing an - El Torito bootable CD. This option takes an - argument which is the path to a boot image from the top of the - tree being written to the CD. By default, &man.mkisofs.8; creates an - ISO image in the so-called floppy disk emulation mode, - and thus expects the boot image to be exactly 1200, 1440 or - 2880 KB in size. Some boot loaders, like the one used by the - FreeBSD distribution disks, do not use emulation mode; in this case, - the option should be used. So, if - /tmp/myboot holds a bootable FreeBSD system - with the boot image in - /tmp/myboot/boot/cdboot, you could produce the - image of an ISO 9660 file system in - /tmp/bootable.iso like so: + + On &os; versions prior to 10.x, this line is also + needed in the kernel configuration file if the burner is an + ATAPI device: - &prompt.root; mkisofs -R -no-emul-boot -b boot/cdboot -o /tmp/bootable.iso /tmp/myboot + device atapicam - Having done that, if you have md - configured in your kernel, you can mount the file system with: + Alternately, this driver can be loaded at boot time by + adding the following line to + /boot/loader.conf: - &prompt.root; mdconfig -a -t vnode -f /tmp/bootable.iso -u 0 -&prompt.root; mount -t cd9660 /dev/md0 /mnt + atapicam_load="YES" - At which point you can verify that /mnt - and /tmp/myboot are identical. + This will require a reboot of the system as this driver + can only be loaded at boot time. + - There are many other options you can use with - &man.mkisofs.8; to fine-tune its behavior. In particular: - modifications to an ISO 9660 layout and the creation of Joliet - and HFS discs. See the &man.mkisofs.8; manual page for details. - - - - burncd - - CDROMs - burning - - If you have an ATAPI CD burner, you can use the - burncd command to burn an ISO image onto a - CD. burncd is part of the base system, installed - as /usr/sbin/burncd. Usage is very simple, as - it has few options: - - &prompt.root; burncd -f cddevice data imagefile.iso fixate - - Will burn a copy of imagefile.iso on - cddevice. The default device is - /dev/acd0. See &man.burncd.8; for options to - set the write speed, eject the CD after burning, and write audio - data. + To verify that &os; recognizes the device, run + dmesg and look for an entry for the device. + On systems prior to 10.x, the device name in the first line of + the output will be acd0 instead of + cd0. + + &prompt.user; dmesg | grep cd +cd0 at ahcich1 bus 0 scbus1 target 0 lun 0 +cd0: <HL-DT-ST DVDRAM GU70N LT20> Removable CD-ROM SCSI-0 device +cd0: Serial Number M3OD3S34152 +cd0: 150.000MB/s transfers (SATA 1.x, UDMA6, ATAPI 12bytes, PIO 8192bytes) +cd0: Attempt to query device size failed: NOT READY, Medium not present - tray closed - cdrecord + Burning a <acronym>CD</acronym> + + In &os;, cdrecord can be used to burn + CDs. This command is installed with the + sysutils/cdrtools package or port. + + + &os; 8.x includes the built-in + burncd utility for burning + CDs using an ATAPI + CD burner. Refer to the manual page for + burncd for usage examples. + + + While cdrecord has many options, basic + usage is simple. Specify the name of the + ISO file to burn and, if the system has + multiple burner devices, specify the name of the device to + use: + + &prompt.root; cdrecord dev=device imagefile.iso + + To determine the device name of the burner, use + which might produce results like + this: - If you do not have an ATAPI CD burner, you will have to use - cdrecord to burn your - CDs. cdrecord is not part of the base system; - you must install it from either the port at sysutils/cdrtools - or the appropriate - package. Changes to the base system can cause binary versions of - this program to fail, possibly resulting in a - coaster. You should therefore either upgrade the - port when you upgrade your system, or if you are tracking -STABLE, upgrade the port when a - new version becomes available. - - While cdrecord has many options, basic usage - is even simpler than burncd. Burning an ISO 9660 - image is done with: - - &prompt.root; cdrecord dev=device imagefile.iso - - The tricky part of using cdrecord is finding - the to use. To find the proper setting, use - the flag of cdrecord, - which might produce results like this: - CDROMs - burning + CD-ROMs + burning &prompt.root; cdrecord -scanbus -Cdrecord-Clone 2.01 (i386-unknown-freebsd7.0) Copyright (C) 1995-2004 Jörg Schilling -Using libscg version 'schily-0.1' +ProDVD-ProBD-Clone 3.00 (amd64-unknown-freebsd10.0) Copyright (C) 1995-2010 Jörg Schilling +Using libscg version 'schily-0.9' scsibus0: 0,0,0 0) 'SEAGATE ' 'ST39236LW ' '0004' Disk 0,1,0 1) 'SEAGATE ' 'ST39173W ' '5958' Disk @@ -1013,590 +794,648 @@ 1,6,0 106) 'ARTEC ' 'AM12S ' '1.06' Scanner 1,7,0 107) * - This lists the appropriate value for the - devices on the list. Locate your CD burner, and use the three - numbers separated by commas as the value for - . In this case, the CRW device is 1,5,0, so the - appropriate input would be - . There are easier - ways to specify this value; see &man.cdrecord.1; for - details. That is also the place to look for information on writing - audio tracks, controlling the speed, and other things. - - - - Duplicating Audio CDs - - You can duplicate an audio CD by extracting the audio data from - the CD to a series of files, and then writing these files to a blank - CD. The process is slightly different for ATAPI and SCSI - drives. - - - SCSI Drives - - - Use cdda2wav to extract the audio. - - &prompt.user; cdda2wav -v255 -D2,0 -B -Owav - - - - Use cdrecord to write the - .wav files. - - &prompt.user; cdrecord -v dev=2,0 -dao -useinfo *.wav - - Make sure that 2,0 is set - appropriately, as described in . - - - - - ATAPI Drives + Locate the entry for the CD burner and + use the three numbers separated by commas as the value for + . In this case, the Yamaha burner device + is 1,5,0, so the appropriate input to + specify that device is . Refer to + the manual page for cdrecord for other ways + to specify this value and for information on writing audio + tracks and controlling the write speed. - - The ATAPI CD driver makes each track available as - /dev/acddtnn, - where d is the drive number, and - nn is the track number written with two - decimal digits, prefixed with zero as needed. - So the first track on the first disk is - /dev/acd0t01, the second is - /dev/acd0t02, the third is - /dev/acd0t03, and so on. - - Make sure the appropriate files exist in - /dev. If the entries are missing, - force the system to retaste the media: + Alternately, run the following command to get the device + address of the burner: - &prompt.root; dd if=/dev/acd0 of=/dev/null count=1 + &prompt.root; camcontrol devlist +<MATSHITA CDRW/DVD UJDA740 1.00> at scbus1 target 0 lun 0 (cd0,pass0) - + Use the numeric values for scbus, + target, and lun. For + this example, 1,0,0 is the device name to + use. + - - Extract each track using &man.dd.1;. You must also use a - specific block size when extracting the files. + + Writing Data to an <acronym>ISO</acronym> File + System - &prompt.root; dd if=/dev/acd0t01 of=track1.cdr bs=2352 -&prompt.root; dd if=/dev/acd0t02 of=track2.cdr bs=2352 -... - - + In order to produce a data CD, the data + files that are going to make up the tracks on the + CD must be prepared before they can be + burned to the CD. In &os;, + sysutils/cdrtools installs + mkisofs, which can be used to produce an + ISO 9660 file system that is an image of a + directory tree within a &unix; file system. The simplest + usage is to specify the name of the ISO + file to create and the path to the files to place into the + ISO 9660 file system: + + &prompt.root; mkisofs -o imagefile.iso /path/to/tree + + + file systems + ISO 9660 + + + This command maps the file names in the specified path to + names that fit the limitations of the standard + ISO 9660 file system, and will exclude + files that do not meet the standard for ISO + file systems. + + + file systems + Joliet + + + A number of options are available to overcome the + restrictions imposed by the standard. In particular, + enables the Rock Ridge extensions common + to &unix; systems and enables Joliet + extensions used by µsoft; systems. + + For CDs that are going to be used only + on &os; systems, can be used to disable + all filename restrictions. When used with + , it produces a file system image that is + identical to the specified &os; tree, even if it violates the + ISO 9660 standard. + + + CD-ROMs + creating bootable + + + The last option of general use is . + This is used to specify the location of a boot image for use + in producing an El Torito bootable + CD. This option takes an argument which is + the path to a boot image from the top of the tree being + written to the CD. By default, + mkisofs creates an ISO + image in floppy disk emulation mode, and thus + expects the boot image to be exactly 1200, 1440 or + 2880 KB in size. Some boot loaders, like the one used by + the &os; distribution media, do not use emulation mode. In + this case, should be used. So, + if /tmp/myboot holds a bootable &os; + system with the boot image in + /tmp/myboot/boot/cdboot, this command + would produce + /tmp/bootable.iso: - - Burn the extracted files to disk using - burncd. You must specify that these are audio - files, and that burncd should fixate the disk - when finished. + &prompt.root; mkisofs -R -no-emul-boot -b boot/cdboot -o /tmp/bootable.iso /tmp/myboot - &prompt.root; burncd -f /dev/acd0 audio track1.cdr track2.cdr ... fixate - - - + The resulting ISO image can be mounted + as a memory disk with: - - Duplicating Data CDs + &prompt.root; mdconfig -a -t vnode -f /tmp/bootable.iso -u 0 +&prompt.root; mount -t cd9660 /dev/md0 /mnt - You can copy a data CD to a image file that is - functionally equivalent to the image file created with - &man.mkisofs.8;, and you can use it to duplicate - any data CD. The example given here assumes that your CDROM - device is acd0. Substitute your - correct CDROM device. + One can then verify that /mnt and + /tmp/myboot are identical. - &prompt.root; dd if=/dev/acd0 of=file.iso bs=2048 + There are many other options available for + mkisofs to fine-tune its behavior. Refer + to &man.mkisofs.8; for details. - Now that you have an image, you can burn it to CD as - described above. + + It is possible to copy a data CD to + an image file that is functionally equivalent to the image + file created with mkisofs. To do so, use + dd with the device name as the input + file and the name of the ISO to create as + the output file: + + &prompt.root; dd if=/dev/cd0 of=file.iso bs=2048 + + The resulting image file can be burned to + CD as described in . + - Using Data CDs + Using Data <acronym>CD</acronym>s - Now that you have created a standard data CDROM, you - probably want to mount it and read the data on it. By - default, &man.mount.8; assumes that a file system is of type - ufs. If you try something like: - - &prompt.root; mount /dev/cd0 /mnt - - you will get a complaint about Incorrect super - block, and no mount. The CDROM is not a - UFS file system, so attempts to mount it - as such will fail. You just need to tell &man.mount.8; that - the file system is of type ISO9660, and - everything will work. You do this by specifying the - option &man.mount.8;. For - example, if you want to mount the CDROM device, - /dev/cd0, under - /mnt, you would execute: - - &prompt.root; mount -t cd9660 /dev/cd0 /mnt - - Note that your device name - (/dev/cd0 in this example) could be - different, depending on the interface your CDROM uses. Also, - the option just executes - &man.mount.cd9660.8;. The above example could be shortened - to: - -&prompt.root; mount_cd9660 /dev/cd0 /mnt - - You can generally use data CDROMs from any vendor in this - way. Disks with certain ISO 9660 extensions might behave - oddly, however. For example, Joliet disks store all filenames - in two-byte Unicode characters. The FreeBSD kernel does not - speak Unicode (yet!), so non-English characters show up as - question marks. (The FreeBSD - CD9660 driver includes hooks to load an appropriate Unicode - conversion table on the fly. Modules for some of the common - encodings are available via the - sysutils/cd9660_unicode port.) - - Occasionally, you might get Device not - configured when trying to mount a CDROM. This - usually means that the CDROM drive thinks that there is no - disk in the tray, or that the drive is not visible on the bus. - It can take a couple of seconds for a CDROM drive to realize - that it has been fed, so be patient. - - Sometimes, a SCSI CDROM may be missed because it did not - have enough time to answer the bus reset. If you have a SCSI - CDROM please add the following option to your kernel - configuration and rebuild your kernel. - - options SCSI_DELAY=15000 + Once an ISO has been burned to a + CD, it can be mounted by specifying the + file system type, the name of the device containing the + CD, and an existing mount point: + + &prompt.root; mount -t cd9660 /dev/cd0 /mnt + + Since mount assumes that a file system + is of type ufs, a Incorrect + super block error will occur if -t + cd9660 is not included when mounting a data + CD. + + While any data CD can be mounted this + way, disks with certain ISO 9660 extensions + might behave oddly. For example, Joliet disks store all + filenames in two-byte Unicode characters. If some non-English + characters show up as question marks, specify the local + charset with . For more information, refer + to &man.mount.cd9660.8;. - This tells your SCSI bus to pause 15 seconds during boot, - to give your CDROM drive every possible chance to answer the - bus reset. - + + In order to do this character conversion with the help + of , the kernel requires the + cd9660_iconv.ko module to be loaded. + This can be done either by adding this line to + loader.conf: - - Burning Raw Data CDs + cd9660_iconv_load="YES" - You can choose to burn a file directly to CD, without - creating an ISO 9660 file system. Some people do this for - backup purposes. This runs more quickly than burning a - standard CD: + and then rebooting the machine, or by directly loading + the module with kldload. + - &prompt.root; burncd -f /dev/acd1 -s 12 data archive.tar.gz fixate + Occasionally, Device not configured + will be displayed when trying to mount a data + CD. This usually means that the + CD drive thinks that there is no disk in + the tray, or that the drive is not visible on the bus. It + can take a couple of seconds for a CD + drive to realize that a media is present, so be + patient. + + Sometimes, a SCSI + CD drive may be missed because it did not + have enough time to answer the bus reset. To resolve this, + a custom kernel can be created which increases the default + SCSI delay. Add the following option to + the custom kernel configuration file and rebuild the kernel + using the instructions in : - In order to retrieve the data burned to such a CD, you - must read data from the raw device node: + options SCSI_DELAY=15000 - &prompt.root; tar xzvf /dev/acd1 + This tells the SCSI bus to pause 15 + seconds during boot, to give the CD + drive every possible chance to answer the bus reset. - You cannot mount this disk as you would a normal CDROM. - Such a CDROM cannot be read under any operating system - except FreeBSD. If you want to be able to mount the CD, or - share data with another operating system, you must use - &man.mkisofs.8; as described above. + + It is possible to burn a file directly to + CD, without creating an + ISO 9660 file system. This is known as + burning a raw data CD and some people do + this for backup purposes. + + This type of disk can not be mounted as a normal data + CD. In order to retrieve the data burned + to such a CD, the data must be read from + the raw device node. For example, this command will extract + a compressed tar file located on the second + CD device into the current working + directory: + + &prompt.root; tar xzvf /dev/cd1 + + In order to mount a data CD, the + data must be written using + mkisofs. + - - Using the ATAPI/CAM Driver - - MarcFonvieilleContributed by - - - - - - - CD burner - ATAPI/CAM driver - - - This driver allows ATAPI devices (CD-ROM, CD-RW, DVD - drives etc...) to be accessed through the SCSI subsystem, and - so allows the use of applications like sysutils/cdrdao or - &man.cdrecord.1;. - - To use this driver, you will need to add the following - line to your kernel configuration file: - - device atapicam - - You also need the following lines in your kernel - configuration file: - - device ata -device scbus -device cd -device pass - - which should already be present. - - Then rebuild, install your new kernel, and reboot your - machine. During the boot process, your burner should show up, - like so: - - acd0: CD-RW <MATSHITA CD-RW/DVD-ROM UJDA740> at ata1-master PIO4 -cd0 at ata1 bus 0 target 0 lun 0 -cd0: <MATSHITA CDRW/DVD UJDA740 1.00> Removable CD-ROM SCSI-0 device -cd0: 16.000MB/s transfers -cd0: Attempt to query device size failed: NOT READY, Medium not present - tray closed - - The drive could now be accessed via the - /dev/cd0 device name, for example to - mount a CD-ROM on /mnt, just type the - following: - - &prompt.root; mount -t cd9660 /dev/cd0 /mnt + + Duplicating Audio <acronym>CD</acronym>s - As root, you can run the following - command to get the SCSI address of the burner: + To duplicate an audio CD, extract the + audio data from the CD to a series of + files, then write these files to a blank + CD. + + describes how to + duplicate and burn an audio CD. If the + &os; version is less than 10.0 and the device is + ATAPI, the module + must be first loaded using the instructions in . + + + Duplicating an Audio <acronym>CD</acronym> + + + The sysutils/cdrtools package or + port installs cdda2wav. This command + can be used to extract all of the audio tracks, with each + track written to a separate WAV file in + the current working directory: + + &prompt.user; cdda2wav -vall -B -Owav + + A device name does not need to be specified if there + is only one CD device on the system. + Refer to the cdda2wav manual page for + instructions on how to specify a device and to learn more + about the other options available for this command. + - &prompt.root; camcontrol devlist -<MATSHITA CDRW/DVD UJDA740 1.00> at scbus1 target 0 lun 0 (pass0,cd0) + + Use cdrecord to write the + .wav files: - So 1,0,0 will be the SCSI address to - use with &man.cdrecord.1; and other SCSI application. + &prompt.user; cdrecord -v dev=2,0 -dao -useinfo *.wav - For more information about ATAPI/CAM and SCSI system, - refer to the &man.atapicam.4; and &man.cam.4; manual - pages. + Make sure that 2,0 is set + appropriately, as described in . + + - Creating and Using Optical Media (DVDs) + + Creating and Using <acronym>DVD</acronym> Media + - MarcFonvieilleContributed by + + + Marc + Fonvieille + + Contributed by + - AndyPolyakovWith inputs from + + + Andy + Polyakov + + With inputs from + - - - DVD + DVD burning - - Introduction - - Compared to the CD, the DVD is the next generation of - optical media storage technology. The DVD can hold more data - than any CD and is nowadays the standard for video - publishing. + Compared to the CD, the + DVD is the next generation of optical media + storage technology. The DVD can hold more + data than any CD and is the standard for + video publishing. - Five physical recordable formats can be defined for what - we will call a recordable DVD: + Five physical recordable formats can be defined for a + recordable DVD: - - - DVD-R: This was the first DVD recordable format - available. The DVD-R standard is defined by the DVD Forum. - This format is write once. - + + + DVD-R: This was the first DVD + recordable format available. The DVD-R standard is defined + by the DVD + Forum. This format is write once. + - - DVD-RW: This is the rewriteable version of - the DVD-R standard. A DVD-RW can be rewritten about 1000 - times. - + + DVD-RW: This is the rewritable + version of the DVD-R standard. A + DVD-RW can be rewritten about 1000 + times. + - - DVD-RAM: This is also a rewriteable format - supported by the DVD Forum. A DVD-RAM can be seen as a - removable hard drive. However, this media is not - compatible with most DVD-ROM drives and DVD-Video players; - only a few DVD writers support the DVD-RAM format. - + + DVD-RAM: This is a rewritable format + which can be seen as a removable hard drive. However, this + media is not compatible with most + DVD-ROM drives and DVD-Video players as + only a few DVD writers support the + DVD-RAM format. Refer to for more information on + DVD-RAM use. + - - DVD+RW: This is a rewriteable format defined by - the DVD+RW - Alliance. A DVD+RW can be rewritten about 1000 - times. - + + DVD+RW: This is a rewritable format + defined by the DVD+RW + Alliance. A DVD+RW can be + rewritten about 1000 times. + - - DVD+R: This format is the write once variation - of the DVD+RW format. - - + + DVD+R: This format is the write once variation of the + DVD+RW format. + + - A single layer recordable DVD can hold up to - 4,700,000,000 bytes which is actually 4.38 GB or - 4485 MB (1 kilobyte is 1024 bytes). + A single layer recordable DVD can hold up + to 4,700,000,000 bytes which is actually 4.38 GB or + 4485 MB as 1 kilobyte is 1024 bytes. - - A distinction must be made between the physical media and - the application. For example, a DVD-Video is a specific - file layout that can be written on any recordable DVD - physical media: DVD-R, DVD+R, DVD-RW etc. Before choosing - the type of media, you must be sure that both the burner and the - DVD-Video player (a standalone player or a DVD-ROM drive on - a computer) are compatible with the media under consideration. - + + A distinction must be made between the physical media and + the application. For example, a DVD-Video is a specific file + layout that can be written on any recordable + DVD physical media such as DVD-R, DVD+R, or + DVD-RW. Before choosing the type of media, + ensure that both the burner and the DVD-Video player are + compatible with the media under consideration. + Configuration - The program &man.growisofs.1; will be used to perform DVD - recording. This command is part of the - dvd+rw-tools utilities (sysutils/dvd+rw-tools). The - dvd+rw-tools support all DVD media - types. - - These tools use the SCSI subsystem to access to the - devices, therefore the ATAPI/CAM - support must be added to your kernel. If your burner - uses the USB interface this addition is useless, and you should - read the for more details on USB - devices configuration. - - You also have to enable DMA access for ATAPI devices, this - can be done in adding the following line to the - /boot/loader.conf file: + To perform DVD recording, use + &man.growisofs.1;. This command is part of the + sysutils/dvd+rw-tools utilities which + support all DVD media types. + + These tools use the SCSI subsystem to + access the devices, therefore ATAPI/CAM support must be loaded + or statically compiled into the kernel. This support is not + needed if the burner uses the USB + interface. Refer to for more + details on USB device configuration. + + DMA access must also be enabled for + ATAPI devices, by adding the following line + to /boot/loader.conf: hw.ata.atapi_dma="1" - Before attempting to use the - dvd+rw-tools you should consult the - dvd+rw-tools' - hardware compatibility notes for any information - related to your DVD burner. + Before attempting to use + dvd+rw-tools, consult the Hardware + Compatibility Notes. - If you want a graphical user interface, you should have - a look to K3b (sysutils/k3b) which provides a - user friendly interface to &man.growisofs.1; and many others + For a graphical user interface, consider using + sysutils/k3b which provides a user + friendly interface to &man.growisofs.1; and many other burning tools. - Burning Data DVDs + Burning Data <acronym>DVD</acronym>s - The &man.growisofs.1; command is a frontend to mkisofs, it will invoke - &man.mkisofs.8; to create the file system layout and will - perform the write on the DVD. This means you do not need to - create an image of the data before the burning process. + Since &man.growisofs.1; is a front-end to mkisofs, it will invoke + &man.mkisofs.8; to create the file system layout and perform + the write on the DVD. This means that an + image of the data does not need to be created before the + burning process. - To burn onto a DVD+R or a DVD-R the data from the /path/to/data directory, use the - following command: + To burn to a DVD+R or a DVD-R the data in + /path/to/data, use the following + command: - &prompt.root; growisofs -dvd-compat -Z /dev/cd0 -J -R /path/to/data + &prompt.root; growisofs -dvd-compat -Z /dev/cd0 -J -R /path/to/data - The options are passed to - &man.mkisofs.8; for the file system creation (in this case: an - ISO 9660 file system with Joliet and Rock Ridge extensions), - consult the &man.mkisofs.8; manual page for more + In this example, is passed to + &man.mkisofs.8; to create an ISO 9660 file system with Joliet + and Rock Ridge extensions. Refer to &man.mkisofs.8; for more details. - The option is used for the initial - session recording in any case: multiple sessions or not. The - DVD device, /dev/cd0, must be - changed according to your configuration. The - parameter will close the disk, - the recording will be unappendable. In return this should provide better - media compatibility with DVD-ROM drives. - - It is also possible to burn a pre-mastered image, for - example to burn the image - imagefile.iso, we will run: + For the initial session recording, is + used for both single and multiple sessions. Replace + /dev/cd0, with the name of the + DVD device. Using + indicates that the disk will be + closed and that the recording will be unappendable. This + should also provide better media compatibility with + DVD-ROM drives. - &prompt.root; growisofs -dvd-compat -Z /dev/cd0=imagefile.iso + To burn a pre-mastered image, such as + imagefile.iso, use: + + &prompt.root; growisofs -dvd-compat -Z /dev/cd0=imagefile.iso The write speed should be detected and automatically set - according to the media and the drive being used. If you want - to force the write speed, use the - parameter. For more information, read the &man.growisofs.1; - manual page. + according to the media and the drive being used. To force the + write speed, use . Refer to + &man.growisofs.1; for example usage. + + + In order to support working files larger than 4.38GB, an + UDF/ISO-9660 hybrid file system must be created by passing + to &man.mkisofs.8; and + all related programs, such as &man.growisofs.1;. This is + required only when creating an ISO image file or when + writing files directly to a disk. Since a disk created this + way must be mounted as an UDF file system with + &man.mount.udf.8;, it will be usable only on an UDF aware + operating system. Otherwise it will look as if it contains + corrupted files. + + To create this type of ISO file: + + &prompt.user; mkisofs -R -J -udf -iso-level 3 -o imagefile.iso /path/to/data + + To burn files directly to a disk: + + &prompt.root; growisofs -dvd-compat -udf -iso-level 3 -Z /dev/cd0 -J -R /path/to/data + + When an ISO image already contains large files, no + additional options are required for &man.growisofs.1; to + burn that image on a disk. + + Be sure to use an up-to-date version of + sysutils/cdrtools, which contains + &man.mkisofs.8;, as an older version may not contain large + files support. If the latest version does not work, install + sysutils/cdrtools-devel and read its + &man.mkisofs.8;. + - Burning a DVD-Video + Burning a <acronym>DVD</acronym>-Video - DVD - DVD-Video + DVD + DVD-Video - A DVD-Video is a specific file layout based on ISO 9660 - and the micro-UDF (M-UDF) specifications. The DVD-Video also - presents a specific data structure hierarchy, it is the reason - why you need a particular program such as multimedia/dvdauthor to author the - DVD. + A DVD-Video is a specific file layout based on the ISO + 9660 and micro-UDF (M-UDF) specifications. Since DVD-Video + presents a specific data structure hierarchy, a particular + program such as multimedia/dvdauthor is + needed to author the DVD. - If you already have an image of the DVD-Video file system, - just burn it in the same way as for any image, see the - previous section for an example. If you have made the DVD - authoring and the result is in, for example, the directory - /path/to/video, the - following command should be used to burn the DVD-Video: + If an image of the DVD-Video file system already exists, + it can be burned in the same way as any other image. If + dvdauthor was used to make the + DVD and the result is in + /path/to/video, the following command + should be used to burn the DVD-Video: - &prompt.root; growisofs -Z /dev/cd0 -dvd-video /path/to/video + &prompt.root; growisofs -Z /dev/cd0 -dvd-video /path/to/video - The option will be passed down to - &man.mkisofs.8; and will instruct it to create a DVD-Video file system - layout. Beside this, the option - implies &man.growisofs.1; - option. + is passed to &man.mkisofs.8; + to instruct it to create a DVD-Video file system layout. + This option implies the + &man.growisofs.1; option. - Using a DVD+RW + Using a <acronym>DVD+RW</acronym> - DVD - DVD+RW + DVD + DVD+RW - Unlike CD-RW, a virgin DVD+RW needs to be formatted before - first use. The &man.growisofs.1; program will take care of it - automatically whenever appropriate, which is the - recommended way. However you can use the - dvd+rw-format command to format the - DVD+RW: + Unlike CD-RW, a virgin DVD+RW needs to + be formatted before first use. It is + recommended to let &man.growisofs.1; take + care of this automatically whenever appropriate. However, it + is possible to use dvd+rw-format to format + the DVD+RW: - &prompt.root; dvd+rw-format /dev/cd0 + &prompt.root; dvd+rw-format /dev/cd0 - You need to perform this operation just once, keep in mind - that only virgin DVD+RW medias need to be formatted. Then you - can burn the DVD+RW in the way seen in previous - sections. + Only perform this operation once and keep in mind that + only virgin DVD+RW medias need to be + formatted. Once formatted, the DVD+RW can + be burned as usual. - If you want to burn new data (burn a totally new file - system not append some data) onto a DVD+RW, you do not need to - blank it, you just have to write over the previous recording - (in performing a new initial session), like this: + To burn a totally new file system and not just append some + data onto a DVD+RW, the media does not need + to be blanked first. Instead, write over the previous + recording like this: - &prompt.root; growisofs -Z /dev/cd0 -J -R /path/to/newdata + &prompt.root; growisofs -Z /dev/cd0 -J -R /path/to/newdata - DVD+RW format offers the possibility to easily append data - to a previous recording. The operation consists in merging a - new session to the existing one, it is not multisession - writing, &man.growisofs.1; will grow the - ISO 9660 file system present on the media. + The DVD+RW format supports appending + data to a previous recording. This operation consists of + merging a new session to the existing one as it is not + considered to be multi-session writing. &man.growisofs.1; + will grow the ISO 9660 file system + present on the media. - For example, if we want to append data to our previous - DVD+RW, we have to use the following: + For example, to append data to a + DVD+RW, use the following: - &prompt.root; growisofs -M /dev/cd0 -J -R /path/to/nextdata + &prompt.root; growisofs -M /dev/cd0 -J -R /path/to/nextdata - The same &man.mkisofs.8; options we used to burn the + The same &man.mkisofs.8; options used to burn the initial session should be used during next writes. - You may want to use the - option if you want better media compatibility with DVD-ROM - drives. In the DVD+RW case, this will not prevent you from - adding data. + Use for better media + compatibility with DVD-ROM drives. When + using DVD+RW, this option will not + prevent the addition of data. - If for any reason you really want to blank the media, do - the following: + To blank the media, use: - &prompt.root; growisofs -Z /dev/cd0=/dev/zero + &prompt.root; growisofs -Z /dev/cd0=/dev/zero - Using a DVD-RW + Using a <acronym>DVD-RW</acronym> - DVD - DVD-RW + DVD + DVD-RW - A DVD-RW accepts two disc formats: the incremental - sequential one and the restricted overwrite. By default - DVD-RW discs are in sequential format. + A DVD-RW accepts two disc formats: + incremental sequential and restricted overwrite. By default, + DVD-RW discs are in sequential + format. - A virgin DVD-RW can be directly written without the need - of a formatting operation, however a non-virgin DVD-RW in - sequential format needs to be blanked before to be able to - write a new initial session. + A virgin DVD-RW can be directly written + without being formatted. However, a non-virgin + DVD-RW in sequential format needs to be + blanked before writing a new initial session. - To blank a DVD-RW in sequential mode, run: + To blank a DVD-RW in sequential + mode: - &prompt.root; dvd+rw-format -blank=full /dev/cd0 + &prompt.root; dvd+rw-format -blank=full /dev/cd0 - A full blanking () will take - about one hour on a 1x media. A fast blanking can be - performed using the option if the - DVD-RW will be recorded in Disk-At-Once (DAO) mode. To burn - the DVD-RW in DAO mode, use the command: - - &prompt.root; growisofs -use-the-force-luke=dao -Z /dev/cd0=imagefile.iso - - The option - should not be required since &man.growisofs.1; attempts to - detect minimally (fast blanked) media and engage DAO - write. - - In fact one should use restricted overwrite mode with - any DVD-RW, this format is more flexible than the default - incremental sequential one. + A full blanking using will + take about one hour on a 1x media. A fast blanking can be + performed using , if the + DVD-RW will be recorded in Disk-At-Once + (DAO) mode. To burn the DVD-RW in DAO + mode, use the command: + + &prompt.root; growisofs -use-the-force-luke=dao -Z /dev/cd0=imagefile.iso + + Since &man.growisofs.1; automatically attempts to detect + fast blanked media and engage DAO write, + should not be + required. + + One should instead use restricted overwrite mode with + any DVD-RW as this format is more + flexible than the default of incremental sequential. - To write data on a sequential DVD-RW, use the same - instructions as for the other DVD formats: + To write data on a sequential DVD-RW, + use the same instructions as for the other + DVD formats: - &prompt.root; growisofs -Z /dev/cd0 -J -R /path/to/data + &prompt.root; growisofs -Z /dev/cd0 -J -R /path/to/data - If you want to append some data to your previous - recording, you will have to use the &man.growisofs.1; - option. However, if you perform data - addition on a DVD-RW in incremental sequential mode, a new - session will be created on the disc and the result will be a - multi-session disc. + To append some data to a previous recording, use + with &man.growisofs.1;. However, if data + is appended on a DVD-RW in incremental + sequential mode, a new session will be created on the disc and + the result will be a multi-session disc. - A DVD-RW in restricted overwrite format does not need to - be blanked before a new initial session, you just have to - overwrite the disc with the option, this - is similar to the DVD+RW case. It is also possible to grow an - existing ISO 9660 file system written on the disc in a same - way as for a DVD+RW with the option. The - result will be a one-session DVD. + A DVD-RW in restricted overwrite format + does not need to be blanked before a new initial session. + Instead, overwrite the disc with . It is + also possible to grow an existing ISO 9660 file system written + on the disc with . The result will be a + one-session DVD. - To put a DVD-RW in the restricted overwrite format, the - following command must be used: + To put a DVD-RW in restricted overwrite + format, the following command must be used: - &prompt.root; dvd+rw-format /dev/cd0 + &prompt.root; dvd+rw-format /dev/cd0 - To change back to the sequential format use: + To change back to sequential format, use: - &prompt.root; dvd+rw-format -blank=full /dev/cd0 + &prompt.root; dvd+rw-format -blank=full /dev/cd0 - Multisession + Multi-Session - Very few DVD-ROM drives support - multisession DVDs, they will most of time, hopefully, only read - the first session. DVD+R, DVD-R and DVD-RW in sequential - format can accept multiple sessions, the notion of multiple - sessions does not exist for the DVD+RW and the DVD-RW + Few DVD-ROM drives support + multi-session DVDs and most of the time only read the first + session. DVD+R, DVD-R and DVD-RW in + sequential format can accept multiple sessions. The notion + of multiple sessions does not exist for the + DVD+RW and the DVD-RW restricted overwrite formats. - Using the following command after an initial (non-closed) - session on a DVD+R, DVD-R, or DVD-RW in sequential format, - will add a new session to the disc: - - &prompt.root; growisofs -M /dev/cd0 -J -R /path/to/nextdata - - Using this command line with a DVD+RW or a DVD-RW in restricted - overwrite mode, will append data in merging the new session to - the existing one. The result will be a single-session disc. - This is the way used to add data after an initial write on these - medias. + Using the following command after an initial non-closed + session on a DVD+R, DVD-R, or DVD-RW in + sequential format, will add a new session to the disc: + + &prompt.root; growisofs -M /dev/cd0 -J -R /path/to/nextdata + + Using this command with a DVD+RW or a + DVD-RW in restricted overwrite mode will + append data while merging the new session to the existing one. + The result will be a single-session disc. Use this method to + add data after an initial write on these types of + media. - Some space on the media is used between each session for - end and start of sessions. Therefore, one should add - sessions with large amount of data to optimize media space. - The number of sessions is limited to 154 for a DVD+R, - about 2000 for a DVD-R, and 127 for a DVD+R Double + Since some space on the media is used between each + session to mark the end and start of sessions, one should + add sessions with a large amount of data to optimize media + space. The number of sessions is limited to 154 for a + DVD+R, about 2000 for a DVD-R, and 127 for a DVD+R Double Layer. @@ -1604,1378 +1443,798 @@ For More Information - To obtain more information about a DVD, the - dvd+rw-mediainfo - /dev/cd0 command can be - ran with the disc in the drive. + To obtain more information about a DVD, + use dvd+rw-mediainfo + /dev/cd0 while the + disc in the specified drive. - More information about the + More information about dvd+rw-tools can be found in - the &man.growisofs.1; manual page, on the dvd+rw-tools - web site and in the cdwrite mailing - list archives. + &man.growisofs.1;, on the dvd+rw-tools + web site, and in the cdwrite + mailing list archives. - The dvd+rw-mediainfo output of the - resulting recording or the media with issues is mandatory - for any problem report. Without this output, it will be - quite impossible to help you. + When creating a problem report related to the use of + dvd+rw-tools, always include the + output of dvd+rw-mediainfo. - - - Creating and Using Floppy Disks - - JulioMerinoOriginal work by - - - - MartinKarlssonRewritten by - - - - - - - Storing data on floppy disks is sometimes useful, for - example when one does not have any other removable storage media - or when one needs to transfer small amounts of data to another - computer. - - This section will explain how to use floppy disks in - FreeBSD. It will primarily cover formatting and usage of - 3.5inch DOS floppies, but the concepts are similar for other - floppy disk formats. - - - Formatting Floppies - - - The Device - - Floppy disks are accessed through entries in - /dev, just like other devices. To - access the raw floppy disk, simply use - /dev/fdN. - - - - - Formatting - - A floppy disk needs to be low-level formated before it - can be used. This is usually done by the vendor, but - formatting is a good way to check media integrity. Although - it is possible to force larger (or smaller) disk sizes, - 1440kB is what most floppy disks are designed for. - - To low-level format the floppy disk you need to use - &man.fdformat.1;. This utility expects the device name as an - argument. - - Make note of any error messages, as these can help - determine if the disk is good or bad. - - - Formatting Floppy Disks - - Use the - /dev/fdN - devices to format the floppy. Insert a new 3.5inch floppy - disk in your drive and issue: - - &prompt.root; /usr/sbin/fdformat -f 1440 /dev/fd0 - - - - - - - The Disk Label - - After low-level formatting the disk, you will need to - place a disk label on it. This disk label will be destroyed - later, but it is needed by the system to determine the size of - the disk and its geometry later. - - The new disk label will take over the whole disk, and will - contain all the proper information about the geometry of the - floppy. The geometry values for the disk label are listed in - /etc/disktab. - - You can run now &man.bsdlabel.8; like so: - - &prompt.root; /sbin/bsdlabel -B -r -w /dev/fd0 fd1440 - - Since &os; 5.1-RELEASE, the &man.bsdlabel.8; - utility replaces the old &man.bsdlabel.8; program. With - &man.bsdlabel.8; a number of obsolete options and parameters - have been retired; in the example above the option - should be removed. For more - information, please refer to the &man.bsdlabel.8; - manual page. - - - - - The File System - - Now the floppy is ready to be high-level formated. This - will place a new file system on it, which will let FreeBSD read - and write to the disk. After creating the new file system, the - disk label is destroyed, so if you want to reformat the disk, you - will have to recreate the disk label. - - The floppy's file system can be either UFS or FAT. - FAT is generally a better choice for floppies. - - To put a new file system on the floppy, issue: - - &prompt.root; /sbin/newfs_msdos /dev/fd0 - - The disk is now ready for use. - - - - - Using the Floppy - - To use the floppy, mount it with &man.mount.msdos.8;. One can also use - emulators/mtools from the ports - collection. - - - - - Creating and Using Data Tapes - - tape media - The major tape media are the 4mm, 8mm, QIC, mini-cartridge and - DLT. - - - 4mm (DDS: Digital Data Storage) - - - tape media - DDS (4mm) tapes - - - tape media - QIC tapes - - 4mm tapes are replacing QIC as the workstation backup media of - choice. This trend accelerated greatly when Conner purchased Archive, - a leading manufacturer of QIC drives, and then stopped production of - QIC drives. 4mm drives are small and quiet but do not have the - reputation for reliability that is enjoyed by 8mm drives. The - cartridges are less expensive and smaller (3 x 2 x 0.5 inches, 76 x 51 - x 12 mm) than 8mm cartridges. 4mm, like 8mm, has comparatively short - head life for the same reason, both use helical scan. - - Data throughput on these drives starts ~150 kB/s, peaking at ~500 kB/s. - Data capacity starts at 1.3 GB and ends at 2.0 GB. Hardware - compression, available with most of these drives, approximately - doubles the capacity. Multi-drive tape library units can have 6 - drives in a single cabinet with automatic tape changing. Library - capacities reach 240 GB. - - The DDS-3 standard now supports tape capacities up to 12 GB (or - 24 GB compressed). - - 4mm drives, like 8mm drives, use helical-scan. All the benefits - and drawbacks of helical-scan apply to both 4mm and 8mm drives. - - Tapes should be retired from use after 2,000 passes or 100 full - backups. - - - - 8mm (Exabyte) - - tape media - Exabyte (8mm) tapes - - - 8mm tapes are the most common SCSI tape drives; they are the best - choice of exchanging tapes. Nearly every site has an Exabyte 2 GB 8mm - tape drive. 8mm drives are reliable, convenient and quiet. Cartridges - are inexpensive and small (4.8 x 3.3 x 0.6 inches; 122 x 84 x 15 mm). - One downside of 8mm tape is relatively short head and tape life due to - the high rate of relative motion of the tape across the heads. - - Data throughput ranges from ~250 kB/s to ~500 kB/s. Data sizes start - at 300 MB and go up to 7 GB. Hardware compression, available with - most of these drives, approximately doubles the capacity. These - drives are available as single units or multi-drive tape libraries - with 6 drives and 120 tapes in a single cabinet. Tapes are changed - automatically by the unit. Library capacities reach 840+ GB. - - The Exabyte Mammoth model supports 12 GB on one tape - (24 GB with compression) and costs approximately twice as much as - conventional tape drives. - - Data is recorded onto the tape using helical-scan, the heads are - positioned at an angle to the media (approximately 6 degrees). The - tape wraps around 270 degrees of the spool that holds the heads. The - spool spins while the tape slides over the spool. The result is a - high density of data and closely packed tracks that angle across the - tape from one edge to the other. - + + Using a <acronym>DVD-RAM</acronym> - - QIC - tape media - QIC-150 + DVD + DVD-RAM - QIC-150 tapes and drives are, perhaps, the most common tape drive - and media around. QIC tape drives are the least expensive serious - backup drives. The downside is the cost of media. QIC tapes are - expensive compared to 8mm or 4mm tapes, up to 5 times the price per GB - data storage. But, if your needs can be satisfied with a half-dozen - tapes, QIC may be the correct choice. QIC is the - most common tape drive. Every site has a QIC - drive of some density or another. Therein lies the rub, QIC has a - large number of densities on physically similar (sometimes identical) - tapes. QIC drives are not quiet. These drives audibly seek before - they begin to record data and are clearly audible whenever reading, - writing or seeking. QIC tapes measure (6 x 4 x 0.7 inches; 152 x - 102 x 17 mm). - - Data throughput ranges from ~150 kB/s to ~500 kB/s. Data capacity - ranges from 40 MB to 15 GB. Hardware compression is available on many - of the newer QIC drives. QIC drives are less frequently installed; - they are being supplanted by DAT drives. - - Data is recorded onto the tape in tracks. The tracks run along - the long axis of the tape media from one end to the other. The number - of tracks, and therefore the width of a track, varies with the tape's - capacity. Most if not all newer drives provide backward-compatibility - at least for reading (but often also for writing). QIC has a good - reputation regarding the safety of the data (the mechanics are simpler - and more robust than for helical scan drives). - - Tapes should be retired from use after 5,000 backups. - + DVD-RAM writers can use either a + SCSI or ATAPI interface. + For ATAPI devices, DMA access has to be + enabled by adding the following line to + /boot/loader.conf: - - DLT - - tape media - DLT - - - DLT has the fastest data transfer rate of all the drive types - listed here. The 1/2" (12.5mm) tape is contained in a single spool - cartridge (4 x 4 x 1 inches; 100 x 100 x 25 mm). The cartridge has a - swinging gate along one entire side of the cartridge. The drive - mechanism opens this gate to extract the tape leader. The tape leader - has an oval hole in it which the drive uses to hook the tape. The - take-up spool is located inside the tape drive. All the other tape - cartridges listed here (9 track tapes are the only exception) have - both the supply and take-up spools located inside the tape cartridge - itself. - - Data throughput is approximately 1.5 MB/s, three times the throughput of - 4mm, 8mm, or QIC tape drives. Data capacities range from 10 GB to 20 GB - for a single drive. Drives are available in both multi-tape changers - and multi-tape, multi-drive tape libraries containing from 5 to 900 - tapes over 1 to 20 drives, providing from 50 GB to 9 TB of - storage. - - With compression, DLT Type IV format supports up to 70 GB - capacity. - - Data is recorded onto the tape in tracks parallel to the direction - of travel (just like QIC tapes). Two tracks are written at once. - Read/write head lifetimes are relatively long; once the tape stops - moving, there is no relative motion between the heads and the - tape. - - - - AIT - - tape media - AIT - - - AIT is a new format from Sony, and can hold up to 50 GB (with - compression) per tape. The tapes contain memory chips which retain an - index of the tape's contents. This index can be rapidly read by the - tape drive to determine the position of files on the tape, instead of - the several minutes that would be required for other tapes. Software - such as SAMS:Alexandria can operate forty or more AIT tape libraries, - communicating directly with the tape's memory chip to display the - contents on screen, determine what files were backed up to which - tape, locate the correct tape, load it, and restore the data from the - tape. - - Libraries like this cost in the region of $20,000, pricing them a - little out of the hobbyist market. - - - - Using a New Tape for the First Time - - The first time that you try to read or write a new, completely - blank tape, the operation will fail. The console messages should be - similar to: - - sa0(ncr1:4:0): NOT READY asc:4,1 -sa0(ncr1:4:0): Logical unit is in process of becoming ready - - The tape does not contain an Identifier Block (block number 0). - All QIC tape drives since the adoption of QIC-525 standard write an - Identifier Block to the tape. There are two solutions: - - - - mt fsf 1 causes the tape drive to write an - Identifier Block to the tape. - - - - Use the front panel button to eject the tape. - - Re-insert the tape and dump data to - the tape. + hw.ata.atapi_dma="1" - dump will report DUMP: End of tape - detected and the console will show: HARDWARE - FAILURE info:280 asc:80,96. + A DVD-RAM can be seen as a removable + hard drive. Like any other hard drive, the + DVD-RAM must be formatted before it can be + used. In this example, the whole disk space will be formatted + with a standard UFS2 file system: + + &prompt.root; dd if=/dev/zero of=/dev/acd0 bs=2k count=1 +&prompt.root; bsdlabel -Bw acd0 +&prompt.root; newfs /dev/acd0 + + The DVD device, + acd0, must be changed according to the + configuration. - rewind the tape using: mt rewind. + Once the DVD-RAM has been formatted, it + can be mounted as a normal hard drive: - Subsequent tape operations are successful. - - + &prompt.root; mount /dev/acd0 /mnt + Once mounted, the DVD-RAM will be both + readable and writeable. - - Backups to Floppies - - - Can I Use Floppies for Backing Up My Data? - backup floppies - floppy disks - - Floppy disks are not really a suitable media for - making backups as: - - - - The media is unreliable, especially over long periods of - time. - - - - Backing up and restoring is very slow. - - - - They have a very limited capacity (the days of backing up - an entire hard disk onto a dozen or so floppies has long since - passed). - - - - However, if you have no other method of backing up your data then - floppy disks are better than no backup at all. - - If you do have to use floppy disks then ensure that you use good - quality ones. Floppies that have been lying around the office for a - couple of years are a bad choice. Ideally use new ones from a - reputable manufacturer. - - - - So How Do I Backup My Data to Floppies? - - The best way to backup to floppy disk is to use - &man.tar.1; with the (multi - volume) option, which allows backups to span multiple - floppies. - - To backup all the files in the current directory and sub-directory - use this (as root): - - &prompt.root; tar Mcvf /dev/fd0 * - - When the first floppy is full &man.tar.1; will prompt you to - insert the next volume (because &man.tar.1; is media independent it - refers to volumes; in this context it means floppy disk). - - Prepare volume #2 for /dev/fd0 and hit return: - - This is repeated (with the volume number incrementing) until all - the specified files have been archived. - - - - Can I Compress My Backups? - - tar - - - gzip - - compression - - Unfortunately, &man.tar.1; will not allow the - option to be used for multi-volume archives. - You could, of course, &man.gzip.1; all the files, - &man.tar.1; them to the floppies, then - &man.gunzip.1; the files again! - + + Creating and Using Floppy Disks - - How Do I Restore My Backups? + - &prompt.root; tar Mxvf /dev/fd0 + This section explains how to format a 3.5 inch floppy disk + in &os;. - There are two ways that you can use to restore only - specific files. First, you can start with the first floppy - and use: + + Steps to Format a Floppy - &prompt.root; tar Mxvf /dev/fd0 filename + A floppy disk needs to be low-level formatted before it + can be used. This is usually done by the vendor, but + formatting is a good way to check media integrity. To + low-level format the floppy disk on &os;, use + &man.fdformat.1;. When using this utility, make note of any + error messages, as these can help determine if the disk is + good or bad. + + + To format the floppy, insert a new 3.5 inch floppy disk + into the first floppy drive and issue: - The utility &man.tar.1; will prompt you to insert subsequent floppies until it - finds the required file. + &prompt.root; /usr/sbin/fdformat -f 1440 /dev/fd0 + - Alternatively, if you know which floppy the file is on then you - can simply insert that floppy and use the same command as above. Note - that if the first file on the floppy is a continuation from the - previous one then &man.tar.1; will warn you that it cannot - restore it, even if you have not asked it to! - + + After low-level formatting the disk, create a disk label + as it is needed by the system to determine the size of the + disk and its geometry. The supported geometry values are + listed in /etc/disktab. + + To write the disk label, use &man.bsdlabel.8;: + + &prompt.root; /sbin/bsdlabel -B -w /dev/fd0 fd1440 + + + + The floppy is now ready to be high-level formatted with + a file system. The floppy's file system can be either UFS + or FAT, where FAT is generally a better choice for + floppies. + + To format the floppy with FAT, issue: + + &prompt.root; /sbin/newfs_msdos /dev/fd0 + + + + The disk is now ready for use. To use the floppy, mount it + with &man.mount.msdosfs.8;. One can also install and use + emulators/mtools from the Ports + Collection. - - Backup Strategies - - LowellGilbertOriginal work by - - - - - - - The first requirement in devising a backup plan is to make sure that - all of the following problems are covered: - - - - Disk failure - - - Accidental file deletion - - - Random file corruption - - - Complete machine destruction (e.g. fire), including destruction - of any on-site backups. - - + + Backup Basics - It is perfectly possible that some systems will be best served by - having each of these problems covered by a completely different - technique. Except for strictly personal systems with very low-value - data, it is unlikely that one technique would cover all of them. + - Some of the techniques in the toolbox are: + Implementing a backup plan is essential in order to have the + ability to recover from disk failure, accidental file deletion, + random file corruption, or complete machine destruction, + including destruction of on-site backups. + + The backup type and schedule will vary, depending upon the + importance of the data, the granularity needed for file + restores, and the amount of acceptable downtime. Some possible + backup techniques include: - Archives of the whole system, backed up onto permanent media - offsite. This actually provides protection against all of the - possible problems listed above, but is slow and inconvenient to - restore from. You can keep copies of the backups onsite and/or - online, but there will still be inconveniences in restoring files, - especially for non-privileged users. + Archives of the whole system, backed up onto permanent, + off-site media. This provides protection against all of the + problems listed above, but is slow and inconvenient to + restore from, especially for non-privileged users. - Filesystem snapshots. This is really only helpful in the - accidental file deletion scenario, but it can be - very helpful in that case, and is quick and - easy to deal with. + File system snapshots, which are useful for restoring + deleted files or previous versions of files. - Copies of whole filesystems and/or disks (e.g. periodic rsync of - the whole machine). This is generally most useful in networks with - unique requirements. For general protection against disk failure, - it is usually inferior to RAID. For restoring - accidentally deleted files, it can be comparable to - UFS snapshots, but that depends on your - preferences. + Copies of whole file systems or disks which are + sychronized with another system on the network using a + scheduled net/rsync. - RAID. Minimizes or avoids downtime when a - disk fails. At the expense of having to deal with disk failures - more often (because you have more disks), albeit at a much lower - urgency. - - - - Checking fingerprints of files. The &man.mtree.8; utility is - very useful for this. Although it is not a backup technique, it - helps guarantee that you will notice when you need to resort to your - backups. This is particularly important for offline backups, and - should be checked periodically. + Hardware or software RAID, which + minimizes or avoids downtime when a disk fails. - It is quite easy to come up with even more techniques, many of them - variations on the ones listed above. Specialized requirements will - usually lead to specialized techniques (for example, backing up a live - database usually requires a method particular to the database software - as an intermediate step). The important thing is to know what dangers - you want to protect against, and how you will handle each. - + Typically, a mix of backup techniques is used. For + example, one could create a schedule to automate a weekly, full + system backup that is stored off-site and to supplement this + backup with hourly ZFS snapshots. In addition, one could make a + manual backup of individual directories or files before making + file edits or deletions. - - Backup Basics - - The three major backup programs are - &man.dump.8;, - &man.tar.1;, - and - &man.cpio.1;. + This section describes some of the utilities which can be + used to create and manage backups on a &os; system. - Dump and Restore + File System Backups + - backup software + backup software dump / restore - dump - restore + + dump + + + restore + + + The traditional &unix; programs for backing up a file + system are &man.dump.8;, which creates the backup, and + &man.restore.8;, which restores the backup. These utilities + work at the disk block level, below the abstractions of the + files, links, and directories that are created by file + systems. Unlike other backup software, + dump backs up an entire file system and is + unable to backup only part of a file system or a directory + tree that spans multiple file systems. Instead of writing + files and directories, dump writes the raw + data blocks that comprise files and directories. - The traditional &unix; backup programs are - dump and restore. They - operate on the drive as a collection of disk blocks, below the - abstractions of files, links and directories that are created by - the file systems. dump backs up an entire - file system on a device. It is unable to backup only part of a - file system or a directory tree that spans more than one - file system. dump does not write files and - directories to tape, but rather writes the raw data blocks that - comprise files and directories. - - If you use dump on your root directory, you - would not back up /home, - /usr or many other directories since - these are typically mount points for other file systems or - symbolic links into those file systems. - - dump has quirks that remain from its early days in - Version 6 of AT&T UNIX (circa 1975). The default - parameters are suitable for 9-track tapes (6250 bpi), not the - high-density media available today (up to 62,182 ftpi). These - defaults must be overridden on the command line to utilize the - capacity of current tape drives. - - .rhosts - It is also possible to backup data across the network to a - tape drive attached to another computer with rdump and - rrestore. Both programs rely upon &man.rcmd.3; and - &man.ruserok.3; to access the remote tape drive. Therefore, - the user performing the backup must be listed in the - .rhosts file on the remote computer. The - arguments to rdump and rrestore must be suitable - to use on the remote computer. When - rdumping from a FreeBSD computer to an - Exabyte tape drive connected to a Sun called - komodo, use: - - &prompt.root; /sbin/rdump 0dsbfu 54000 13000 126 komodo:/dev/nsa8 /dev/da0a 2>&1 - - Beware: there are security implications to - allowing .rhosts authentication. Evaluate your - situation carefully. - - It is also possible to use dump and - restore in a more secure fashion over - ssh. + + If dump is used on the root + directory, it will not back up /home, + /usr or many other directories since + these are typically mount points for other file systems or + symbolic links into those file systems. + + + When used to restore data, restore + stores temporary files in /tmp/ by + default. When using a recovery disk with a small + /tmp, set TMPDIR to a + directory with more free space in order for the restore to + succeed. + + When using dump, be aware that some + quirks remain from its early days in Version 6 of + AT&T &unix;,circa 1975. The default parameters assume a + backup to a 9-track tape, rather than to another type of media + or to the high-density tapes available today. These defaults + must be overridden on the command line. + + + .rhosts + + It is possible to backup a file system across the network + to a another system or to a tape drive attached to another + computer. While the &man.rdump.8; and &man.rrestore.8; + utilities can be used for this purpose, they are not + considered to be secure. + + Instead, one can use dump and + restore in a more secure fashion over an + SSH connection. This example creates a + full, compressed backup of /usr and sends + the backup file to the specified host over a + SSH connection. - Using <command>dump</command> over <application>ssh</application> + Using <command>dump</command> over + <application>ssh</application> &prompt.root; /sbin/dump -0uan -f - /usr | gzip -2 | ssh -c blowfish \ targetuser@targetmachine.example.com dd of=/mybigfiles/dump-usr-l0.gz - - Or using dump's built-in method, - setting the environment variable RSH: + This example sets RSH in order to write the + backup to a tape drive on a remote system over a + SSH connection: - Using <command>dump</command> over <application>ssh</application> with <envar>RSH</envar> set - - &prompt.root; RSH=/usr/bin/ssh /sbin/dump -0uan -f targetuser@targetmachine.example.com:/dev/sa0 /usr + Using <command>dump</command> over + <application>ssh</application> with <envar>RSH</envar> + Set + &prompt.root; env RSH=/usr/bin/ssh /sbin/dump -0uan -f targetuser@targetmachine.example.com:/dev/sa0 /usr - - <command>tar</command> + Directory Backups + - backup software - tar + backup software + tar - &man.tar.1; also dates back to Version 6 of AT&T UNIX - (circa 1975). tar operates in cooperation - with the file system; it writes files and - directories to tape. tar does not support the - full range of options that are available from &man.cpio.1;, but - it does not require the unusual command - pipeline that cpio uses. + Several built-in utilities are available for backing up + and restoring specified files and directories as + needed. - tar + A good choice for making a backup of all of the files in a + directory is &man.tar.1;. This utility dates back to Version + 6 of AT&T &unix; and by default assumes a recursive backup + to a local tape device. Switches can be used to instead + specify the name of a backup file. - On FreeBSD 5.3 and later, both GNU tar - and the default bsdtar are available. The - GNU version can be invoked with gtar. It - supports remote devices using the same syntax as - rdump. To tar to an - Exabyte tape drive connected to a Sun called - komodo, use: + tar - &prompt.root; /usr/bin/gtar cf komodo:/dev/nsa8 . 2>&1 + This example creates a compressed backup of the current + directory and saves it to + /tmp/mybackup.tgz. When creating a + backup file, make sure that the backup is not saved to the + same directory that is being backed up. - The same could be accomplished with - bsdtar by using a pipeline and - rsh to send the data to a remote tape - drive. + + Backing Up the Current Directory with + <command>tar</command> - &prompt.root; tar cf - . | rsh hostname dd of=tape-device obs=20b + &prompt.root; tar czvf /tmp/mybackup.tgz . + - If you are worried about the security of backing up over a - network you should use the ssh command - instead of rsh. - + To restore the entire backup, cd into + the directory to restore into and specify the name of the + backup. Note that this will overwrite any newer versions of + files in the restore directory. When in doubt, restore to a + temporary directory or specify the name of the file within the + backup to restore. - - <command>cpio</command> - - backup software - cpio - + + Restoring Up the Current Directory with + <command>tar</command> - &man.cpio.1; is the original &unix; file interchange tape - program for magnetic media. cpio has options - (among many others) to perform byte-swapping, write a number of - different archive formats, and pipe the data to other programs. - This last feature makes cpio an excellent - choice for installation media. cpio does not - know how to walk the directory tree and a list of files must be - provided through stdin. - cpio + &prompt.root; tar xzvf /tmp/mybackup.tgz + - cpio does not support backups across - the network. You can use a pipeline and rsh - to send the data to a remote tape drive. + There are dozens of available switches which are described + in &man.tar.1;. This utility also supports the use of exclude + patterns to specify which files should not be included when + backing up the specified directory or restoring files from a + backup. + + + backup software + cpio + + + To create a backup using a specified list of files and + directories, &man.cpio.1; is a good choice. Unlike + tar, cpio does not know + how to walk the directory tree and it must be provided the + list of files to backup. + + For example, a list of files can be created using + ls or find. This + example creates a recursive listing of the current directory + which is then piped to cpio in order to + create an output backup file named + /tmp/mybackup.cpio. - &prompt.root; for f in directory_list; do -find $f >> backup.list -done -&prompt.root; cpio -v -o --format=newc < backup.list | ssh user@host "cat > backup_device" + + Using<command>ls</command> and <command>cpio</command> + to Make a Recursive Backup of the Current Directory - Where directory_list is the list of - directories you want to back up, - user@host is the - user/hostname combination that will be performing the backups, and - backup_device is where the backups should - be written to (e.g., /dev/nsa0). - + &prompt.root; ls -R | cpio -ovF /tmp/mybackup.cpio + - - <command>pax</command> - backup software - pax + backup software + pax pax POSIX IEEE - &man.pax.1; is IEEE/&posix;'s answer to - tar and cpio. Over the - years the various versions of tar and - cpio have gotten slightly incompatible. So - rather than fight it out to fully standardize them, &posix; - created a new archive utility. pax attempts - to read and write many of the various cpio - and tar formats, plus new formats of its own. - Its command set more resembles cpio than - tar. - - - - <application>Amanda</application> - - backup software - Amanda - - Amanda - - - Amanda (Advanced Maryland - Network Disk Archiver) is a client/server backup system, - rather than a single program. An Amanda server will backup to - a single tape drive any number of computers that have Amanda - clients and a network connection to the Amanda server. A - common problem at sites with a number of large disks is - that the length of time required to backup to data directly to tape - exceeds the amount of time available for the task. Amanda - solves this problem. Amanda can use a holding disk to - backup several file systems at the same time. Amanda creates - archive sets: a group of tapes used over a period of time to - create full backups of all the file systems listed in Amanda's - configuration file. The archive set also contains nightly - incremental (or differential) backups of all the file systems. - Restoring a damaged file system requires the most recent full - backup and the incremental backups. - - The configuration file provides fine control of backups and the - network traffic that Amanda generates. Amanda will use any of the - above backup programs to write the data to tape. Amanda is available - as either a port or a package, it is not installed by default. - - - - Do Nothing - - Do nothing is not a computer program, but it is the - most widely used backup strategy. There are no initial costs. There - is no backup schedule to follow. Just say no. If something happens - to your data, grin and bear it! - - If your time and your data is worth little to nothing, then - Do nothing is the most suitable backup program for your - computer. But beware, &unix; is a useful tool, you may find that within - six months you have a collection of files that are valuable to - you. - - Do nothing is the correct backup method for - /usr/obj and other directory trees that can be - exactly recreated by your computer. An example is the files that - comprise the HTML or &postscript; version of this Handbook. - These document formats have been created from SGML input - files. Creating backups of the HTML or &postscript; files is - not necessary. The SGML files are backed up regularly. - - - - Which Backup Program Is Best? - - LISA - - - &man.dump.8; Period. Elizabeth D. Zwicky - torture tested all the backup programs discussed here. The clear - choice for preserving all your data and all the peculiarities of &unix; - file systems is dump. Elizabeth created file systems containing - a large variety of unusual conditions (and some not so unusual ones) - and tested each program by doing a backup and restore of those - file systems. The peculiarities included: files with holes, files with - holes and a block of nulls, files with funny characters in their - names, unreadable and unwritable files, devices, files that change - size during the backup, files that are created/deleted during the - backup and more. She presented the results at LISA V in Oct. 1991. - See torture-testing - Backup and Archive Programs. - - - - Emergency Restore Procedure - - - Before the Disaster - - There are only four steps that you need to perform in - preparation for any disaster that may occur. - - bsdlabel - - - First, print the bsdlabel from each of your disks - (e.g. bsdlabel da0 | lpr), your file system table - (/etc/fstab) and all boot messages, - two copies of - each. - - fix-it floppies - Second, determine that the boot and fix-it floppies - (boot.flp and fixit.flp) - have all your devices. The easiest way to check is to reboot your - machine with the boot floppy in the floppy drive and check the boot - messages. If all your devices are listed and functional, skip on to - step three. - - Otherwise, you have to create two custom bootable - floppies which have a kernel that can mount all of your disks - and access your tape drive. These floppies must contain: - fdisk, bsdlabel, - newfs, mount, and - whichever backup program you use. These programs must be - statically linked. If you use dump, the - floppy must contain restore. - - Third, create backup tapes regularly. Any changes that you make - after your last backup may be irretrievably lost. Write-protect the - backup tapes. - - Fourth, test the floppies (either boot.flp - and fixit.flp or the two custom bootable - floppies you made in step two.) and backup tapes. Make notes of the - procedure. Store these notes with the bootable floppy, the - printouts and the backup tapes. You will be so distraught when - restoring that the notes may prevent you from destroying your backup - tapes (How? In place of tar xvf /dev/sa0, you - might accidentally type tar cvf /dev/sa0 and - over-write your backup tape). - - For an added measure of security, make bootable floppies and two - backup tapes each time. Store one of each at a remote location. A - remote location is NOT the basement of the same office building. A - number of firms in the World Trade Center learned this lesson the - hard way. A remote location should be physically separated from - your computers and disk drives by a significant distance. - - - A Script for Creating a Bootable Floppy - - A backup utility which tries to bridge the features + provided by tar and cpio + is &man.pax.1;. Over the years, the various versions of + tar and cpio became + slightly incompatible. &posix; created pax + which attempts to read and write many of the various + cpio and tar formats, + plus new formats of its own. -config kernel root on da0 swap on da0 and da1 dumps on da0 + The pax equivalent to the previous + examples would be: -device isa0 -device pci0 + + Backing Up the Current Directory with + <command>pax</command> -device fdc0 at isa? port "IO_FD1" bio irq 6 drq 2 vector fdintr -device fd0 at fdc0 drive 0 + &prompt.root; pax -wf /tmp/mybackup.pax . + + -device ncr0 + + Using Data Tapes for Backups -device scbus0 + tape media -device sc0 at isa? port "IO_KBD" tty irq 1 vector scintr -device npx0 at isa? port "IO_NPX" irq 13 vector npxintr + While tape technology has continued to evolve, modern + backup systems tend to combine off-site backups with local + removable media. &os; supports any tape drive that uses + SCSI, such as LTO or + DAT. There is limited support for + SATA and USB tape + drives. -device da0 -device da1 -device da2 + For SCSI tape devices, &os; uses the + &man.sa.4; driver and the /dev/sa0, + /dev/nsa0, and + /dev/esa0 devices. The physical device + name is /dev/sa0. When + /dev/nsa0 is used, the backup application + will not rewind the tape after writing a file, which allows + writing more than one file to a tape. Using + /dev/esa0 ejects the tape after the + device is closed. + + In &os;, mt is used to control + operations of the tape drive, such as seeking through files on + a tape or writing tape control marks to the tape. For + example, the first three files on a tape can be preserved by + skipping past them before writing a new file: + + &prompt.root; mt -f /dev/nsa0 fsf 3 + + This utility supports many operations. Refer to + &man.mt.1; for details. + + To write a single file to tape using + tar, specify the name of the tape device + and the file to backup: + + &prompt.root; tar cvf /dev/sa0 file + + To recover files from a tar archive + on tape into the current directory: + + &prompt.root; tar xvf /dev/sa0 + + To backup a UFS file system, use + dump. This examples backs up + /usr without rewinding the tape when + finished: + + &prompt.root; dump -0aL -b64 -f /dev/nsa0 /usr + + To interactively restore files from a + dump file on tape into the current + directory: -device sa0 + &prompt.root; restore -i -f /dev/nsa0 + -pseudo-device loop # required by INET -pseudo-device gzip # Exec gzipped a.out's -EOM - exit 1 -fi + + Third-Party Backup Utilities -cp -f /sys/compile/MINI/kernel /mnt + + backup software + -gzip -c -best /sbin/init > /mnt/sbin/init -gzip -c -best /sbin/fsck > /mnt/sbin/fsck -gzip -c -best /sbin/mount > /mnt/sbin/mount -gzip -c -best /sbin/halt > /mnt/sbin/halt -gzip -c -best /sbin/restore > /mnt/sbin/restore + The &os; Ports Collection provides many third-party + utilities which can be used to schedule the creation of + backups, simplify tape backup, and make backups easier and + more convenient. Many of these applications are client/server + based and can be used to automate the backups of a single + system or all of the computers in a network. -gzip -c -best /bin/sh > /mnt/bin/sh -gzip -c -best /bin/sync > /mnt/bin/sync + Popular utilities include + Amanda, + Bacula, + rsync, and + duplicity. + -cp /root/.profile /mnt/root + + Emergency Recovery -cp -f /dev/MAKEDEV /mnt/dev -chmod 755 /mnt/dev/MAKEDEV + In addition to regular backups, it is recommended to + perform the following steps as part of an emergency + preparedness plan. -chmod 500 /mnt/sbin/init -chmod 555 /mnt/sbin/fsck /mnt/sbin/mount /mnt/sbin/halt -chmod 555 /mnt/bin/sh /mnt/bin/sync -chmod 6555 /mnt/sbin/restore + + bsdlabel -# -# create the devices nodes -# -cd /mnt/dev -./MAKEDEV std -./MAKEDEV da0 -./MAKEDEV da1 -./MAKEDEV da2 -./MAKEDEV sa0 -./MAKEDEV pty0 -cd / + Create a print copy of the output of the following + commands: -# -# create minimum file system table -# -cat > /mnt/etc/fstab <<EOM -/dev/fd0a / ufs rw 1 1 -EOM + + + gpart show + -# -# create minimum passwd file -# -cat > /mnt/etc/passwd <<EOM -root:*:0:0:Charlie &:/root:/bin/sh -EOM - -cat > /mnt/etc/master.passwd <<EOM -root::0:0::0:0:Charlie &:/root:/bin/sh -EOM - -chmod 600 /mnt/etc/master.passwd -chmod 644 /mnt/etc/passwd -/usr/sbin/pwd_mkdb -d/mnt/etc /mnt/etc/master.passwd + + more /etc/fstab + -# -# umount the floppy and inform the user -# -/sbin/umount /mnt -echo "The floppy has been unmounted and is now ready."]]> + + dmesg + + - + livefs + CD - + Store this printout and a copy of the installation media + in a secure location. Should an emergency restore be + needed, boot into the installation media and select + Live CD to access a rescue shell. This + rescue mode can be used to view the current state of the + system, and if needed, to reformat disks and restore data + from backups. - - After the Disaster + + The installation media for + &os;/&arch.i386; &rel2.current;-RELEASE does not + include a rescue shell. For this version, instead + download and burn a Livefs CD image from + ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/&arch.i386;/ISO-IMAGES/&rel2.current;/&os;-&rel2.current;-RELEASE-&arch.i386;-livefs.iso. + - The key question is: did your hardware survive? You have been - doing regular backups so there is no need to worry about the - software. - - If the hardware has been damaged, the parts should be replaced - before attempting to use the computer. - - If your hardware is okay, check your floppies. If you are using - a custom boot floppy, boot single-user (type -s - at the boot: prompt). Skip the following - paragraph. - - If you are using the boot.flp and - fixit.flp floppies, keep reading. Insert the - boot.flp floppy in the first floppy drive and - boot the computer. The original install menu will be displayed on - the screen. Select the Fixit--Repair mode with CDROM or - floppy. option. Insert the - fixit.flp when prompted. - restore and the other programs that you need are - located in /mnt2/rescue - (/mnt2/stand for - &os; versions older than 5.2). - - Recover each file system separately. - - - mount - - root partition - - bsdlabel - - - newfs - - Try to mount (e.g. mount /dev/da0a - /mnt) the root partition of your first disk. If the - bsdlabel was damaged, use bsdlabel to re-partition and - label the disk to match the label that you printed and saved. Use - newfs to re-create the file systems. Re-mount the root - partition of the floppy read-write (mount -u -o rw - /mnt). Use your backup program and backup tapes to - recover the data for this file system (e.g. restore vrf - /dev/sa0). Unmount the file system (e.g. umount - /mnt). Repeat for each file system that was - damaged. - - Once your system is running, backup your data onto new tapes. - Whatever caused the crash or data loss may strike again. Another - hour spent now may save you from further distress later. - + Next, test the rescue shell and the backups. Make notes + of the procedure. Store these notes with the media, the + printouts, and the backups. These notes may prevent the + inadvertent destruction of the backups while under the stress + of performing an emergency recovery. + + For an added measure of security, store the latest backup + at a remote location which is physically separated from the + computers and disk drives by a significant distance. - Network, Memory, and File-Backed File Systems + + Memory Disks + - MarcFonvieilleReorganized and enhanced by + + + Marc + Fonvieille + + Reorganized and enhanced by + - - virtual disks - - disks - virtual - - Aside from the disks you physically insert into your computer: - floppies, CDs, hard drives, and so forth; other forms of disks - are understood by FreeBSD - the virtual - disks. + In addition to physical disks, &os; also supports the + creation and use of memory disks. One possible use for a + memory disk is to access the contents of an + ISO file system without the overhead of first + burning it to a CD or DVD, + then mounting the CD/DVD media. + + In &os;, the &man.md.4; driver is used to provide support + for memory disks. The GENERIC kernel + includes this driver. When using a custom kernel configuration + file, ensure it includes this line: - NFS - Coda - - disks - memory - - These include network file systems such as the Network File System and Coda, memory-based - file systems and - file-backed file systems. - - According to the FreeBSD version you run, you will have to use - different tools for creation and use of file-backed and - memory-based file systems. - - - Use &man.devfs.5; to allocate device nodes transparently for the - user. - + device md - File-Backed File System + Attaching and Detaching Existing Images + - disks - file-backed + disks + memory - The utility &man.mdconfig.8; is used to configure and enable - memory disks, &man.md.4;, under FreeBSD. To use - &man.mdconfig.8;, you have to load &man.md.4; module or to add - the support in your kernel configuration file: - - device md + To mount an existing file system image, use + mdconfig to specify the name of the + ISO file and a free unit number. Then, + refer to that unit number to mount it on an existing mount + point. Once mounted, the files in the ISO + will appear in the mount point. This example attaches + diskimage.iso to the memory device + /dev/md0 then mounts that memory device + on /mnt: - The &man.mdconfig.8; command supports three kinds of - memory backed virtual disks: memory disks allocated with - &man.malloc.9;, memory disks using a file or swap space as - backing. One possible use is the mounting of floppy - or CD images kept in files. + &prompt.root; mdconfig -f diskimage.iso -u 0 +&prompt.root; mount /dev/md0 /mnt - To mount an existing file system image: - - - Using <command>mdconfig</command> to Mount an Existing File System - Image + If a unit number is not specified with + , mdconfig will + automatically allocate an unused memory device and output + the name of the allocated unit, such as + md4. Refer to &man.mdconfig.8; for more + details about this command and its options. - &prompt.root; mdconfig -a -t vnode -f diskimage -u 0 -&prompt.root; mount /dev/md0 /mnt - - - To create a new file system image with &man.mdconfig.8;: - - - Creating a New File-Backed Disk with <command>mdconfig</command> - - &prompt.root; dd if=/dev/zero of=newimage bs=1k count=5k -5120+0 records in -5120+0 records out -&prompt.root; mdconfig -a -t vnode -f newimage -u 0 -&prompt.root; bsdlabel -w md0 auto -&prompt.root; newfs md0a -/dev/md0a: 5.0MB (10224 sectors) block size 16384, fragment size 2048 - using 4 cylinder groups of 1.25MB, 80 blks, 192 inodes. -super-block backups (for fsck -b #) at: - 160, 2720, 5280, 7840 -&prompt.root; mount /dev/md0a /mnt -&prompt.root; df /mnt -Filesystem 1K-blocks Used Avail Capacity Mounted on -/dev/md0a 4710 4 4330 0% /mnt - - - If you do not specify the unit number with the - option, &man.mdconfig.8; will use the - &man.md.4; automatic allocation to select an unused device. - The name of the allocated unit will be output on stdout like - md4. For more details about - &man.mdconfig.8;, please refer to the manual page. - - The utility &man.mdconfig.8; is very useful, however it - asks many command lines to create a file-backed file system. - FreeBSD also comes with a tool called &man.mdmfs.8;, - this program configures a &man.md.4; disk using - &man.mdconfig.8;, puts a UFS file system on it using - &man.newfs.8;, and mounts it using &man.mount.8;. For example, - if you want to create and mount the same file system image as - above, simply type the following: - - - Configure and Mount a File-Backed Disk with <command>mdmfs</command> - &prompt.root; dd if=/dev/zero of=newimage bs=1k count=5k -5120+0 records in -5120+0 records out -&prompt.root; mdmfs -F newimage -s 5m md0 /mnt -&prompt.root; df /mnt -Filesystem 1K-blocks Used Avail Capacity Mounted on -/dev/md0 4718 4 4338 0% /mnt - - - If you use the option without unit - number, &man.mdmfs.8; will use &man.md.4; auto-unit feature to - automatically select an unused device. For more details - about &man.mdmfs.8;, please refer to the manual page. - - - - - Memory-Based File System - disks - memory file system + disks + detaching a memory disk - For a - memory-based file system the swap backing - should normally be used. Using swap backing does not mean - that the memory disk will be swapped out to disk by default, - but merely that the memory disk will be allocated from a - memory pool which can be swapped out to disk if needed. It is - also possible to create memory-based disk which are - &man.malloc.9; backed, but using malloc backed memory disks, - especially large ones, can result in a system panic if the - kernel runs out of memory. - - - Creating a New Memory-Based Disk with - <command>mdconfig</command> - - &prompt.root; mdconfig -a -t malloc -s 5m -u 1 -&prompt.root; newfs -U md1 -/dev/md1: 5.0MB (10240 sectors) block size 16384, fragment size 2048 - using 4 cylinder groups of 1.27MB, 81 blks, 256 inodes. - with soft updates -super-block backups (for fsck -b #) at: - 32, 2624, 5216, 7808 -&prompt.root; mount /dev/md1 /mnt -&prompt.root; df /mnt -Filesystem 1K-blocks Used Avail Capacity Mounted on -/dev/md1 4846 2 4458 0% /mnt - + When a memory disk is no longer in use, its resources + should be released back to the system. First, unmount the + file system, then use mdconfig to detach + the disk from the system and release its resources. To + continue this example: - - Creating a New Memory-Based Disk with - <command>mdmfs</command> - &prompt.root; mdmfs -M -s 5m md2 /mnt -&prompt.root; df /mnt -Filesystem 1K-blocks Used Avail Capacity Mounted on -/dev/md2 4846 2 4458 0% /mnt - + &prompt.root; umount /mnt +&prompt.root; mdconfig -d -u 0 - Instead of using a &man.malloc.9; backed file system, it is - possible to use swap, for that just replace - with in the - command line of &man.mdconfig.8;. The &man.mdmfs.8; utility - by default (without ) creates a swap-based - disk. For more details, please refer to &man.mdconfig.8; - and &man.mdmfs.8; manual pages. + To determine if any memory disks are still attached to the + system, type mdconfig -l. - - Detaching a Memory Disk from the System + + Creating a File- or Memory-Backed Memory Disk + - disks - detaching a memory disk + disks + memory file system + &os; also supports memory disks where the storage to use + is allocated from either a hard disk or an area of memory. + The first method is commonly referred to as a file-backed file + system and the second method as a memory-backed file system. + Both types can be created using + mdconfig. + + To create a new memory-backed file system, specify a type + of swap and the size of the memory disk to + create. Then, format the memory disk with a file system and + mount as usual. This example creates a 5M memory disk on unit + 1. That memory disk is then formatted with + the UFS file system before it is + mounted: - When a memory-based or file-based file system - is not used, you should release all resources to the system. - The first thing to do is to unmount the file system, then use - &man.mdconfig.8; to detach the disk from the system and release - the resources. + &prompt.root; mdconfig -a -t swap -s 5m -u 1 +&prompt.root; newfs -U md1 +/dev/md1: 5.0MB (10240 sectors) block size 16384, fragment size 2048 + using 4 cylinder groups of 1.27MB, 81 blks, 192 inodes. + with soft updates +super-block backups (for fsck -b #) at: + 160, 2752, 5344, 7936 +&prompt.root; mount /dev/md1 /mnt +&prompt.root; df /mnt +Filesystem 1K-blocks Used Avail Capacity Mounted on +/dev/md1 4718 4 4338 0% /mnt - For example to detach and free all resources used by - /dev/md4: + To create a new file-backed memory disk, first allocate an + area of disk to use. This example creates an empty 5K file + named newimage: - &prompt.root; mdconfig -d -u 4 + &prompt.root; dd if=/dev/zero of=newimage bs=1k count=5k +5120+0 records in +5120+0 records out - It is possible to list information about configured - &man.md.4; devices in using the command mdconfig - -l. + Next, attach that file to a memory disk, label the memory + disk and format it with the UFS file + system, mount the memory disk, and verify the size of the + file-backed disk: + + &prompt.root; mdconfig -f newimage -u 0 +&prompt.root; bsdlabel -w md0 auto +&prompt.root; newfs md0a +/dev/md0a: 5.0MB (10224 sectors) block size 16384, fragment size 2048 + using 4 cylinder groups of 1.25MB, 80 blks, 192 inodes. +super-block backups (for fsck -b #) at: + 160, 2720, 5280, 7840 +&prompt.root; mount /dev/md0a /mnt +&prompt.root; df /mnt +Filesystem 1K-blocks Used Avail Capacity Mounted on +/dev/md0a 4710 4 4330 0% /mnt + It takes several commands to create a file- or + memory-backed file system using mdconfig. + &os; also comes with mdmfs which + automatically configures a memory disk, formats it with the + UFS file system, and mounts it. For + example, after creating newimage + with dd, this one command is equivalent to + running the bsdlabel, + newfs, and mount + commands shown above: + + &prompt.root; mdmfs -F newimage -s 5m md0 /mnt + + To instead create a new memory-based memory disk with + mdmfs, use this one command: + + &prompt.root; mdmfs -s 5m md1 /mnt + + If the unit number is not specified, + mdmfs will automatically select an unused + memory device. For more details about + mdmfs, refer to &man.mdmfs.8;. - File System Snapshots + + File System Snapshots + - TomRhodesContributed by + + + Tom + Rhodes + + Contributed by + - - - file systems snapshots - FreeBSD offers a feature in conjunction with - Soft Updates: File system snapshots. + &os; offers a feature in conjunction with + Soft Updates: file system + snapshots. + + UFS snapshots allow a user to create images of specified + file systems, and treat them as a file. Snapshot files must be + created in the file system that the action is performed on, and + a user may create no more than 20 snapshots per file system. + Active snapshots are recorded in the superblock so they are + persistent across unmount and remount operations along with + system reboots. When a snapshot is no longer required, it can + be removed using &man.rm.1;. While snapshots may be removed in + any order, all the used space may not be acquired because + another snapshot will possibly claim some of the released + blocks. - Snapshots allow a user to create images of specified file - systems, and treat them as a file. - Snapshot files must be created in the file system that the - action is performed on, and a user may create no more than 20 - snapshots per file system. Active snapshots are recorded - in the superblock so they are persistent across unmount and - remount operations along with system reboots. When a snapshot - is no longer required, it can be removed with the standard &man.rm.1; - command. Snapshots may be removed in any order, - however all the used space may not be acquired because another snapshot will - possibly claim some of the released blocks. - - The un-alterable file flag is set + The un-alterable file flag is set by &man.mksnap.ffs.8; after initial creation of a snapshot file. - The &man.unlink.1; command makes an exception for snapshot files - since it allows them to be removed. + &man.unlink.1; makes an exception for snapshot files since it + allows them to be removed. - Snapshots are created with the &man.mount.8; command. To place - a snapshot of /var in the file - /var/snapshot/snap use the following - command: + Snapshots are created using &man.mount.8;. To place a + snapshot of /var in the + file /var/snapshot/snap, use the following + command: -&prompt.root; mount -u -o snapshot /var/snapshot/snap /var + &prompt.root; mount -u -o snapshot /var/snapshot/snap /var - Alternatively, you can use &man.mksnap.ffs.8; to create - a snapshot: -&prompt.root; mksnap_ffs /var /var/snapshot/snap - - One can find snapshot files on a file system (e.g. /var) - by using the &man.find.1; command: -&prompt.root; find /var -flags snapshot + Alternatively, use &man.mksnap.ffs.8; to create the + snapshot: - Once a snapshot has been created, it has several - uses: + &prompt.root; mksnap_ffs /var /var/snapshot/snap - - - Some administrators will use a snapshot file for backup purposes, - because the snapshot can be transfered to CDs or tape. - + One can find snapshot files on a file system, such as + /var, using + &man.find.1;: - - File integrity, &man.fsck.8; may be ran on the snapshot. - Assuming that the file system was clean when it was mounted, you - should always get a clean (and unchanging) result. - This is essentially what the - background &man.fsck.8; process does. - + &prompt.root; find /var -flags snapshot - - Run the &man.dump.8; utility on the snapshot. - A dump will be returned that is consistent with the - file system and the timestamp of the snapshot. &man.dump.8; - can also take a snapshot, create a dump image and then - remove the snapshot in one command using the - flag. - + Once a snapshot has been created, it has several + uses: - - &man.mount.8; the snapshot as a frozen image of the file system. - To &man.mount.8; the snapshot - /var/snapshot/snap run: + + + Some administrators will use a snapshot file for backup + purposes, because the snapshot can be transferred to + CDs or tape. + -&prompt.root; mdconfig -a -t vnode -f /var/snapshot/snap -u 4 -&prompt.root; mount -r /dev/md4 /mnt + + The file system integrity checker, &man.fsck.8;, may be + run on the snapshot. Assuming that the file system was + clean when it was mounted, this should always provide a + clean and unchanging result. + - - + + Running &man.dump.8; on the snapshot will produce a dump + file that is consistent with the file system and the + timestamp of the snapshot. &man.dump.8; can also take a + snapshot, create a dump image, and then remove the snapshot + in one command by using . + + + + The snapshot can be mounted as a frozen image of the + file system. To &man.mount.8; the snapshot + /var/snapshot/snap run: + + &prompt.root; mdconfig -a -t vnode -o readonly -f /var/snapshot/snap -u 4 +&prompt.root; mount -r /dev/md4 /mnt + + - You can now walk the hierarchy of your frozen /var - file system mounted at /mnt. Everything will - initially be in the same state it was during the snapshot creation time. - The only exception is that any earlier snapshots will appear - as zero length files. When the use of a snapshot has delimited, - it can be unmounted with: + The frozen /var is now available + through /mnt. Everything will initially be + in the same state it was during the snapshot creation time. The + only exception is that any earlier snapshots will appear as zero + length files. To unmount the snapshot, use: -&prompt.root; umount /mnt + &prompt.root; umount /mnt &prompt.root; mdconfig -d -u 4 - For more information about and - file system snapshots, including technical papers, you can visit - Marshall Kirk McKusick's website at - http://www.mckusick.com/. + For more information about and + file system snapshots, including technical papers, visit + Marshall Kirk McKusick's website at http://www.mckusick.com/. @@ -3181,92 +2440,109 @@ - - Encrypting Disk Partitions + + Encrypting Disk Partitions + - LuckyGreenContributed by -
shamrock@cypherpunks.to
-
+ + + Lucky + Green + + Contributed by + +
+ shamrock@cypherpunks.to +
+
+
-
- disks - encrypting + encrypting + - FreeBSD offers excellent online protections against - unauthorized data access. File permissions and Mandatory - Access Control (MAC) (see ) help prevent - unauthorized third-parties from accessing data while the operating - system is active and the computer is powered up. However, - the permissions enforced by the operating system are irrelevant if an - attacker has physical access to a computer and can simply move - the computer's hard drive to another system to copy and analyze - the sensitive data. - - Regardless of how an attacker may have come into possession of - a hard drive or powered-down computer, both GEOM - Based Disk Encryption (gbde) and - geli cryptographic subsystems in &os; are able - to protect the data on the computer's file systems against even - highly-motivated attackers with significant resources. Unlike - cumbersome encryption methods that encrypt only individual files, - gbde and geli transparently - encrypt entire file systems. No cleartext ever touches the hard + &os; offers excellent online protections against + unauthorized data access. File permissions and Mandatory Access Control (MAC) help + prevent unauthorized users from accessing data while the + operating system is active and the computer is powered up. + However, the permissions enforced by the operating system are + irrelevant if an attacker has physical access to a computer and + can move the computer's hard drive to another system to copy and + analyze the data. + + Regardless of how an attacker may have come into possession + of a hard drive or powered-down computer, the + GEOM-based cryptographic subsystems built + into &os; are able to protect the data on the computer's file + systems against even highly-motivated attackers with significant + resources. Unlike encryption methods that encrypt individual + files, the built-in gbde and + geli utilities can be used to transparently + encrypt entire file systems. No cleartext ever touches the hard drive's platter. - - Disk Encryption with <application>gbde</application> - - - - Become <systemitem class="username">root</systemitem> - - Configuring gbde requires - super-user privileges. - - &prompt.user; su - -Password: - - - - Add &man.gbde.4; Support to the Kernel Configuration File - - Add the following line to the kernel configuration - file: + This chapter demonstrates how to create an encrypted file + system on &os;. It first demonstrates the process using + gbde and then demonstrates the same example + using geli. + + + Disk Encryption with + <application>gbde</application> + + The objective of the &man.gbde.4; facility is to provide a + formidable challenge for an attacker to gain access to the + contents of a cold storage device. + However, if the computer is compromised while up and running + and the storage device is actively attached, or the attacker + has access to a valid passphrase, it offers no protection to + the contents of the storage device. Thus, it is important to + provide physical security while the system is running and to + protect the passphrase used by the encryption + mechanism. + + This facility provides several barriers to protect the + data stored in each disk sector. It encrypts the contents of + a disk sector using 128-bit AES in + CBC mode. Each sector on the disk is + encrypted with a different AES key. For + more information on the cryptographic design, including how + the sector keys are derived from the user-supplied passphrase, + refer to &man.gbde.4;. - options GEOM_BDE + &os; provides a kernel module for + gbde which can be loaded with this + command: - Rebuild the kernel as described in . + &prompt.root; kldload geom_bde - Reboot into the new kernel. - - + If using a custom kernel configuration file, ensure it + contains this line: - - Preparing the Encrypted Hard Drive + options GEOM_BDE - The following example assumes that you are adding a new hard - drive to your system that will hold a single encrypted partition. - This partition will be mounted as /private. - gbde can also be used to encrypt - /home and /var/mail, but - this requires more complex instructions which exceed the scope of - this introduction. + The following example demonstrates adding a new hard drive + to a system that will hold a single encrypted partition that + will be mounted as /private. + Encrypting a Partition with + <application>gbde</application> + Add the New Hard Drive - Install the new drive to the system as explained in . For the purposes of this example, - a new hard drive partition has been added as - /dev/ad4s1c. The - /dev/ad0s1* - devices represent existing standard FreeBSD partitions on - the example system. + Install the new drive to the system as explained in + . For the purposes of this + example, a new hard drive partition has been added as + /dev/ad4s1c and + /dev/ad0s1* + represents the existing standard &os; partitions. &prompt.root; ls /dev/ad* /dev/ad0 /dev/ad0s1b /dev/ad0s1e /dev/ad4s1 @@ -3275,81 +2551,78 @@ - Create a Directory to Hold gbde Lock Files + Create a Directory to Hold <command>gbde</command> + Lock Files &prompt.root; mkdir /etc/gbde - The gbde lock file contains - information that gbde requires to - access encrypted partitions. Without access to the lock file, - gbde will not be able to decrypt - the data contained in the encrypted partition without - significant manual intervention which is not supported by the - software. Each encrypted partition uses a separate lock - file. + The gbde lock file + contains information that gbde + requires to access encrypted partitions. Without access + to the lock file, gbde will not + be able to decrypt the data contained in the encrypted + partition without significant manual intervention which is + not supported by the software. Each encrypted partition + uses a separate lock file. - Initialize the gbde Partition + Initialize the <command>gbde</command> + Partition A gbde partition must be - initialized before it can be used. This initialization needs to - be performed only once: - - &prompt.root; gbde init /dev/ad4s1c -i -L /etc/gbde/ad4s1c - - &man.gbde.8; will open your editor, permitting you to set - various configuration options in a template. For use with UFS1 - or UFS2, set the sector_size to 2048: + initialized before it can be used. This initialization + needs to be performed only once. This command will open + the default editor, in order to set various configuration + options in a template. For use with the + UFS file system, set the sector_size to + 2048: - $FreeBSD: src/sbin/gbde/template.txt,v 1.1 2002/10/20 11:16:13 phk Exp $ + &prompt.root; gbde init /dev/ad4s1c -i -L /etc/gbde/ad4s1c.lock# $FreeBSD: src/sbin/gbde/template.txt,v 1.1.36.1 2009/08/03 08:13:06 kensmith Exp $ # # Sector size is the smallest unit of data which can be read or written. # Making it too small decreases performance and decreases available space. # Making it too large may prevent filesystems from working. 512 is the # minimum and always safe. For UFS, use the fragment size # -sector_size = 2048 -[...] - - - &man.gbde.8; will ask you twice to type the passphrase that - should be used to secure the data. The passphrase must be the - same both times. gbde's ability to - protect your data depends entirely on the quality of the - passphrase that you choose. - - For tips on how to select a secure passphrase that is easy - to remember, see the Diceware - Passphrase website. - - The gbde init command creates a lock - file for your gbde partition that in - this example is stored as - /etc/gbde/ad4s1c. +sector_size = 2048 +[...] + + Once the edit is saved, the user will be asked twice + to type the passphrase used to secure the data. The + passphrase must be the same both times. The ability of + gbde to protect data depends + entirely on the quality of the passphrase. For tips on + how to select a secure passphrase that is easy to + remember, see http://world.std.com/~reinhold/diceware.htm. + + This initialization creates a lock file for the + gbde partition. In this + example, it is stored as + /etc/gbde/ad4s1c.lock. Lock files + must end in .lock in order to be correctly + detected by the /etc/rc.d/gbde start + up script. - gbde lock files - must be backed up together with the - contents of any encrypted partitions. While deleting a lock - file alone cannot prevent a determined attacker from - decrypting a gbde partition, - without the lock file, the legitimate owner will be unable - to access the data on the encrypted partition without a - significant amount of work that is totally unsupported by - &man.gbde.8; and its designer. + Lock files must be backed up + together with the contents of any encrypted partitions. + Without the lock file, the legitimate owner will be + unable to access the data on the encrypted + partition. - Attach the Encrypted Partition to the Kernel + Attach the Encrypted Partition to the + Kernel - &prompt.root; gbde attach /dev/ad4s1c -l /etc/gbde/ad4s1c + &prompt.root; gbde attach /dev/ad4s1c -l /etc/gbde/ad4s1c.lock - You will be asked to provide the passphrase that you - selected during the initialization of the encrypted partition. - The new encrypted device will show up in + This command will prompt to input the passphrase that + was selected during the initialization of the encrypted + partition. The new encrypted device will appear in /dev as /dev/device_name.bde: @@ -3360,43 +2633,36 @@ - Create a File System on the Encrypted Device + Create a File System on the Encrypted + Device - Once the encrypted device has been attached to the kernel, - you can create a file system on the device. To create a file - system on the encrypted device, use &man.newfs.8;. Since it is - much faster to initialize a new UFS2 file system than it is to - initialize the old UFS1 file system, using &man.newfs.8; with - the option is recommended. - - &prompt.root; newfs -U -O2 /dev/ad4s1c.bde - - - The &man.newfs.8; command must be performed on an - attached gbde partition which - is identified by a - *.bde - extension to the device name. - + Once the encrypted device has been attached to the + kernel, a file system can be created on the device. This + example creates a UFS file system with + soft updates enabled. Be sure to specify the partition + which has a + *.bde + extension: + + &prompt.root; newfs -U /dev/ad4s1c.bde Mount the Encrypted Partition - Create a mount point for the encrypted file system. - - &prompt.root; mkdir /private + Create a mount point and mount the encrypted file + system: - Mount the encrypted file system. - - &prompt.root; mount /dev/ad4s1c.bde /private + &prompt.root; mkdir /private +&prompt.root; mount /dev/ad4s1c.bde /private - Verify That the Encrypted File System is Available + Verify That the Encrypted File System is + Available - The encrypted file system should now be visible to - &man.df.1; and be available for use. + The encrypted file system should now be visible and + available for use: &prompt.user; df -H Filesystem Size Used Avail Capacity Mounted on @@ -3408,251 +2674,195 @@ /dev/ad4s1c.bde 150G 4.1K 138G 0% /private - - - - Mounting Existing Encrypted File Systems After each boot, any encrypted file systems must be - re-attached to the kernel, checked for errors, and mounted, before - the file systems can be used. The required commands must be - executed as user root. - - - - Attach the gbde Partition to the Kernel - - &prompt.root; gbde attach /dev/ad4s1c -l /etc/gbde/ad4s1c - - You will be asked to provide the passphrase that you - selected during initialization of the encrypted - gbde partition. - - - - Check the File System for Errors - - Since encrypted file systems cannot yet be listed in - /etc/fstab for automatic mounting, the - file systems must be checked for errors by running &man.fsck.8; - manually before mounting. - - &prompt.root; fsck -p -t ffs /dev/ad4s1c.bde - - - - Mount the Encrypted File System - - &prompt.root; mount /dev/ad4s1c.bde /private - - The encrypted file system is now available for use. - - - - - Automatically Mounting Encrypted Partitions + manually re-attached to the kernel, checked for errors, and + mounted, before the file systems can be used. To configure + these steps, add the following lines to + /etc/rc.conf: + + gbde_autoattach_all="YES" +gbde_devices="ad4s1c" +gbde_lockdir="/etc/gbde" + + This requires that the passphrase be entered at the + console at boot time. After typing the correct passphrase, + the encrypted partition will be mounted automatically. + Additional gbde boot options are + available and listed in &man.rc.conf.5;. - It is possible to create a script to automatically attach, - check, and mount an encrypted partition, but for security reasons - the script should not contain the &man.gbde.8; password. Instead, - it is recommended that such scripts be run manually while - providing the password via the console or &man.ssh.1;. - - As of &os; 5.2-RELEASE, there is a new rc.d script - provided. Arguments for this script can be passed via - &man.rc.conf.5;, for example: - - gbde_autoattach_all="YES" -gbde_devices="ad4s1c" - - This will require that the gbde - passphrase be entered at boot time. After typing the correct - passphrase, the gbde encrypted - partition will be mounted automatically. This can be very - useful when using gbde on - notebooks. - - - - - Cryptographic Protections Employed by gbde - - &man.gbde.8; encrypts the sector payload using 128-bit AES in - CBC mode. Each sector on the disk is encrypted with a different - AES key. For more information on gbde's - cryptographic design, including how the sector keys are derived - from the user-supplied passphrase, see &man.gbde.4;. - - - - Compatibility Issues - - &man.sysinstall.8; is incompatible with - gbde-encrypted devices. All + + + sysinstall is incompatible + with gbde-encrypted devices. All *.bde devices must be detached from the - kernel before starting &man.sysinstall.8; or it will crash during - its initial probing for devices. To detach the encrypted device - used in our example, use the following command: - &prompt.root; gbde detach /dev/ad4s1c - - Also note that, as &man.vinum.4; does not use the - &man.geom.4; subsystem, you cannot use - gbde with - vinum volumes. - + kernel before starting sysinstall + or it will crash during its initial probing for devices. To + detach the encrypted device used in the example, use the + following command: + &prompt.root; gbde detach /dev/ad4s1c + - - Disk Encryption with <command>geli</command> + + + Disk Encryption with <command>geli</command> + - DanielGerzoContributed by + + + Daniel + Gerzo + + Contributed by + - - - - A new cryptographic GEOM class is available as of &os; 6.0 - - geli. It is currently being developed by - &a.pjd;. Geli is different to - gbde; it offers different features and uses - a different scheme for doing cryptographic work. - - The most important features of &man.geli.8; are: + An alternative cryptographic GEOM class + is available using geli. This control + utility adds some features and uses a different scheme for + doing cryptographic work. It provides the following + features: - Utilizes the &man.crypto.9; framework — when - cryptographic hardware is available, geli - will use it automatically. + Utilizes the &man.crypto.9; framework and + automatically uses cryptographic hardware when it is + available. + - Supports multiple cryptographic algorithms (currently - AES, Blowfish, and 3DES). + Supports multiple cryptographic algorithms such as + AES, Blowfish, and + 3DES. + Allows the root partition to be encrypted. The - passphrase used to access the encrypted root partition will - be requested during the system boot. + passphrase used to access the encrypted root partition + will be requested during system boot. + - Allows the use of two independent keys (e.g. a - key and a company key). + Allows the use of two independent keys. + - geli is fast - performs simple - sector-to-sector encryption. + It is fast as it performs simple sector-to-sector + encryption. + - Allows backup and restore of Master Keys. When a user - has to destroy his keys, it will be possible to get access - to the data again by restoring keys from the backup. + Allows backup and restore of master keys. If a user + destroys their keys, it is still possible to get access to + the data by restoring keys from the backup. + - Allows to attach a disk with a random, one-time key - — useful for swap partitions and temporary file + Allows a disk to attach with a random, one-time key + which is useful for swap partitions and temporary file systems. - More geli features can be found in the - &man.geli.8; manual page. + More features and usage examples can be found in + &man.geli.8;. - The next steps will describe how to enable support for - geli in the &os; kernel and will explain how - to create a new geli encryption provider. At - the end it will be demonstrated how to create an encrypted swap - partition using features provided by geli. - - In order to use geli, you must be running - &os; 6.0-RELEASE or later. Super-user privileges will be - required since modifications to the kernel are necessary. + The following example describes how to generate a key file + which will be used as part of the master key for the encrypted + provider mounted under /private. The key + file will provide some random data used to encrypt the master + key. The master key will also be protected by a passphrase. + The provider's sector size will be 4kB. The example describes + how to attach to the geli provider, create + a file system on it, mount it, work with it, and finally, how + to detach it. + Encrypting a Partition with + <command>geli</command> + - Adding <command>geli</command> Support to the Kernel - Configuration File + Load <command>geli</command> Support - Add the following lines to the kernel configuration - file: + Support for geli is available as a + loadable kernel module. To configure the system to + automatically load the module at boot time, add the + following line to + /boot/loader.conf: - options GEOM_ELI -device crypto + geom_eli_load="YES" - Rebuild the kernel as described in . + To load the kernel module now: - Alternatively, the geli module can - be loaded at boot time. Add the following line to the - /boot/loader.conf: + &prompt.root; kldload geom_eli - geom_eli_load="YES" + For a custom kernel, ensure the kernel configuration + file contains these lines: - &man.geli.8; should now be supported by the kernel. + options GEOM_ELI +device crypto - Generating the Master Key + Generate the Master Key - The following example will describe how to generate a - key file, which will be used as part of the Master Key for - the encrypted provider mounted under - /private. The key - file will provide some random data used to encrypt the - Master Key. The Master Key will be protected by a - passphrase as well. Provider's sector size will be 4kB big. - Furthermore, the discussion will describe how to attach the - geli provider, create a file system on - it, how to mount it, how to work with it, and finally how to - detach it. - - It is recommended to use a bigger sector size (like 4kB) for - better performance. - - The Master Key will be protected with a passphrase and - the data source for key file will be - /dev/random. The sector size of - /dev/da2.eli, which we call provider, - will be 4kB. + The following commands generate a master key + (/root/da2.key) that is protected + with a passphrase. The data source for the key file is + /dev/random and the sector size of + the provider (/dev/da2.eli) is 4kB as + a bigger sector size provides better performance: &prompt.root; dd if=/dev/random of=/root/da2.key bs=64 count=1 &prompt.root; geli init -s 4096 -K /root/da2.key /dev/da2 Enter new passphrase: Reenter new passphrase: - It is not mandatory that both a passphrase and a key - file are used; either method of securing the Master Key can - be used in isolation. - - If key file is given as -, standard - input will be used. This example shows how more than one - key file can be used. + It is not mandatory to use both a passphrase and a key + file as either method of securing the master key can be + used in isolation. + + If the key file is given as -, standard + input will be used. For example, this command generates + three key files: &prompt.root; cat keyfile1 keyfile2 keyfile3 | geli init -K - /dev/da2 - Attaching the Provider with the generated Key + Attach the Provider with the Generated Key + + To attach the provider, specify the key file, the name + of the disk, and the passphrase: &prompt.root; geli attach -k /root/da2.key /dev/da2 Enter passphrase: - The new plaintext device will be named - /dev/da2.eli. + This creates a new device with an + .eli extension: &prompt.root; ls /dev/da2* /dev/da2 /dev/da2.eli - Creating the new File System + Create the New File System + + Next, format the device with the + UFS file system and mount it on an + existing mount point: &prompt.root; dd if=/dev/random of=/dev/da2.eli bs=1m &prompt.root; newfs /dev/da2.eli -&prompt.root; mount /dev/da2.eli /private +&prompt.root; mount /dev/da2.eli /private - The encrypted file system should be visible to &man.df.1; - and be available for use now. + The encrypted file system should now be available for + use: &prompt.root; df -H Filesystem Size Used Avail Capacity Mounted on @@ -3662,188 +2872,767 @@ /dev/ad0s1d 989M 1.5M 909M 0% /tmp /dev/ad0s1e 3.9G 1.3G 2.3G 35% /var /dev/da2.eli 150G 4.1K 138G 0% /private - - - - - Unmounting and Detaching the Provider - - Once the work on the encrypted partition is done, and - the /private partition - is no longer needed, it is prudent to consider unmounting - and detaching the geli encrypted - partition from the kernel. - - &prompt.root; umount /private -&prompt.root; geli detach da2.eli - More information about the use of &man.geli.8; can be - found in the manual page. - - - Encrypting a Swap Partition - - The following example demonstrates how to create a - geli encrypted swap partition. - - &prompt.root; dd if=/dev/random of=/dev/ad0s1b bs=1m -&prompt.root; geli onetime -d -a 3des ad0s1b -&prompt.root; swapon /dev/ad0s1b.eli - + Once the work on the encrypted partition is done, and the + /private partition is no longer needed, + it is prudent to put the device into cold storage by + unmounting and detaching the geli encrypted + partition from the kernel: - - Using the <filename>geli</filename> <filename>rc.d</filename> Script + &prompt.root; umount /private +&prompt.root; geli detach da2.eli - geli comes with a rc.d script which - can be used to simplify the usage of geli. - An example of configuring geli through - &man.rc.conf.5; follows: - - geli_devices="da2" -geli_da2_flags="-p -k /root/da2.key" - - This will configure /dev/da2 as a - geli provider of which the Master Key file - is located in /root/da2.key, and - geli will not use a passphrase when - attaching the provider (note that this can only be used if -P - was given during the geli init phase). The - system will detach the geli provider from - the kernel before the system shuts down. - - More information about configuring rc.d is provided in the - rc.d section of the - Handbook. - + A rc.d script is provided to + simplify the mounting of geli-encrypted + devices at boot time. For this example, add these lines to + /etc/rc.conf: + + geli_devices="da2" +geli_da2_flags="-p -k /root/da2.key" + + This configures /dev/da2 as a + geli provider with a master key of + /root/da2.key. The system will + automatically detach the provider from the kernel before the + system shuts down. During the startup process, the script + will prompt for the passphrase before attaching the provider. + Other kernel messages might be shown before and after the + password prompt. If the boot process seems to stall, look + carefully for the password prompt among the other messages. + Once the correct passphrase is entered, the provider is + attached. The file system is then mounted, typically by an + entry in /etc/fstab. Refer to for instructions on how to + configure a file system to mount at boot time.
- - Encrypting Swap Space + + Encrypting Swap + - ChristianBrüfferWritten by + + + Christian + Brüffer + + Written by + - swap encrypting - Swap encryption in &os; is easy to configure and has been - available since &os; 5.3-RELEASE. Depending on which version - of &os; is being used, different options are available - and configuration can vary slightly. From &os; 6.0-RELEASE onwards, - the &man.gbde.8; or &man.geli.8; encryption systems can be used - for swap encryption. With earlier versions, only &man.gbde.8; is - available. Both systems use the encswap - rc.d script. - - The previous section, Encrypting - Disk Partitions, includes a short discussion on the different - encryption systems. - - - Why should Swap be Encrypted? - - Like the encryption of disk partitions, encryption of swap space - is done to protect sensitive information. Imagine an application - that e.g. deals with passwords. As long as these passwords stay in - physical memory, all is well. However, if the operating system starts - swapping out memory pages to free space for other applications, the - passwords may be written to the disk platters unencrypted and easy to - retrieve for an adversary. Encrypting swap space can be a solution for - this scenario. - + Like the encryption of disk partitions, encryption of swap + space is used to protect sensitive information. Consider an + application that deals with passwords. As long as these + passwords stay in physical memory, they are not written to disk + and will be cleared after a reboot. However, if &os; starts + swapping out memory pages to free space, the passwords may be + written to the disk unencrypted. Encrypting swap space can be a + solution for this scenario. + + This section demonstrates how to configure an encrypted + swap partition using &man.gbde.8; or &man.geli.8; encryption. + It assumes a UFS file system where + /dev/ad0s1b is the swap partition. + + + Configuring Encrypted Swap + + Swap partitions are not encrypted by default and should be + cleared of any sensitive data before continuing. To overwrite + the current swap partition with random garbage, execute the + following command: - - Preparation + &prompt.root; dd if=/dev/random of=/dev/ad0s1b bs=1m - - For the remainder of this section, ad0s1b - will be the swap partition. - + To encrypt the swap partition using &man.gbde.8;, add the + .bde suffix to the swap line in + /etc/fstab: + + # Device Mountpoint FStype Options Dump Pass# +/dev/ad0s1b.bde none swap sw 0 0 - Up to this point the swap has been unencrypted. It is possible that - there are already passwords or other sensitive data on the disk platters - in cleartext. To rectify this, the data on the swap partition should be - overwritten with random garbage: + To instead encrypt the swap partition using &man.geli.8;, + use the + .eli suffix: - &prompt.root; dd if=/dev/random of=/dev/ad0s1b bs=1m + # Device Mountpoint FStype Options Dump Pass# +/dev/ad0s1b.eli none swap sw 0 0 + + By default, &man.geli.8; uses the AES + algorithm with a key length of 128 bit. These defaults can be + altered by using geli_swap_flags in + /etc/rc.conf. The following flags + configure encryption using the Blowfish algorithm with a key + length of 128 bits and a sectorsize of 4 kilobytes, and sets + detach on last close: + + geli_swap_flags="-e blowfish -l 128 -s 4096 -d" + + Refer to the description of onetime in + &man.geli.8; for a list of possible options. - Swap Encryption with &man.gbde.8; + Encrypted Swap Verification + + Once the system has rebooted, proper operation of the + encrypted swap can be verified using + swapinfo. - If &os; 6.0-RELEASE or newer is being used, the - .bde suffix should be added to the device in the - respective /etc/fstab swap line: + If &man.gbde.8; is being used: - -# Device Mountpoint FStype Options Dump Pass# -/dev/ad0s1b.bde none swap sw 0 0 - + &prompt.user; swapinfo +Device 1K-blocks Used Avail Capacity +/dev/ad0s1b.bde 542720 0 542720 0% - For systems prior to &os; 6.0-RELEASE, the following line - in /etc/rc.conf is also needed: + If &man.geli.8; is being used: - gbde_swap_enable="YES" + &prompt.user; swapinfo +Device 1K-blocks Used Avail Capacity +/dev/ad0s1b.eli 542720 0 542720 0% + - - Swap Encryption with &man.geli.8; + + + Highly Available Storage + (<acronym>HAST</acronym>) + + + + + Daniel + Gerzo + + Contributed by + + + + + + + Freddie + Cash + + With inputs from + + + + + Pawel Jakub + Dawidek + + + + + + Michael W. + Lucas + + + + + + Viktor + Petersson + + + + + + + HAST + high availability + + + High availability is one of the main requirements in + serious business applications and highly-available storage is a + key component in such environments. In &os;, the Highly + Available STorage (HAST) framework allows + transparent storage of the same data across several physically + separated machines connected by a TCP/IP + network. HAST can be understood as a + network-based RAID1 (mirror), and is similar to the DRBD® + storage system used in the GNU/&linux; platform. In combination + with other high-availability features of &os; like + CARP, HAST makes it + possible to build a highly-available storage cluster that is + resistant to hardware failures. + + The following are the main features of + HAST: + + + + Can be used to mask I/O errors on + local hard drives. + + + + File system agnostic as it works with any file system + supported by &os;. + + + + Efficient and quick resynchronization as only the blocks + that were modified during the downtime of a node are + synchronized. + + + + + + Can be used in an already deployed environment to add + additional redundancy. + + + + Together with CARP, + Heartbeat, or other tools, it can + be used to build a robust and durable storage system. + + - Alternatively, the procedure for using &man.geli.8; for swap - encryption is similar to that of using &man.gbde.8;. The - .eli suffix should be added to the device in the - respective /etc/fstab swap line: + After reading this section, you will know: - -# Device Mountpoint FStype Options Dump Pass# -/dev/ad0s1b.eli none swap sw 0 0 - + + + What HAST is, how it works, and + which features it provides. + + + + How to set up and use HAST on + &os;. + + + + How to integrate CARP and + &man.devd.8; to build a robust storage system. + + + + Before reading this section, you should: + + + + Understand &unix; and &os; basics (). + + + + Know how to configure network + interfaces and other core &os; subsystems (). + + + + Have a good understanding of &os; + networking (). + + - &man.geli.8; uses the AES algorithm with - a key length of 256 bit by default. + The HAST project was sponsored by The + &os; Foundation with support from http://www.omc.net/ + and http://www.transip.nl/. + + + HAST Operation + + HAST provides synchronous block-level + replication between two physical machines: the + primary, also known as the + master node, and the + secondary, or slave + node. These two machines together are referred to as a + cluster. + + Since HAST works in a primary-secondary + configuration, it allows only one of the cluster nodes to be + active at any given time. The primary node, also called + active, is the one which will handle all + the I/O requests to + HAST-managed devices. The secondary node + is automatically synchronized from the primary node. + + The physical components of the HAST + system are the local disk on primary node, and the disk on the + remote, secondary node. + + HAST operates synchronously on a block + level, making it transparent to file systems and applications. + HAST provides regular GEOM providers in + /dev/hast/ for use by other tools or + applications. There is no difference between using + HAST-provided devices and raw disks or + partitions. + + Each write, delete, or flush operation is sent to both the + local disk and to the remote disk over + TCP/IP. Each read operation is served from + the local disk, unless the local disk is not up-to-date or an + I/O error occurs. In such cases, the read + operation is sent to the secondary node. + + HAST tries to provide fast failure + recovery. For this reason, it is important to reduce + synchronization time after a node's outage. To provide fast + synchronization, HAST manages an on-disk + bitmap of dirty extents and only synchronizes those during a + regular synchronization, with an exception of the initial + sync. + + There are many ways to handle synchronization. + HAST implements several replication modes + to handle different synchronization methods: - Optionally, these defaults can be altered using the - geli_swap_flags option in - /etc/rc.conf. The following line tells the - encswap rc.d script to create &man.geli.8; swap - partitions using the Blowfish algorithm with a key length of 128 bit, - a sectorsize of 4 kilobytes and the detach on last close - option set: + + + memsync: This mode reports a + write operation as completed when the local write + operation is finished and when the remote node + acknowledges data arrival, but before actually storing the + data. The data on the remote node will be stored directly + after sending the acknowledgement. This mode is intended + to reduce latency, but still provides good + reliability. + - geli_swap_flags="-a blowfish -l 128 -s 4096 -d" + + fullsync: This mode reports a + write operation as completed when both the local write and + the remote write complete. This is the safest and the + slowest replication mode. This mode is the + default. + - Please refer to the description of the onetime command - in the &man.geli.8; manual page for a list of possible options. + + async: This mode reports a write + operation as completed when the local write completes. + This is the fastest and the most dangerous replication + mode. It should only be used when replicating to a + distant node where latency is too high for other + modes. + + - Verifying that it Works + HAST Configuration - Once the system has been rebooted, proper operation of the - encrypted swap can be verified using the - swapinfo command. + The HAST framework consists of several + components: - If &man.gbde.8; is being used: + + + The &man.hastd.8; daemon which provides data + synchronization. When this daemon is started, it will + automatically load geom_gate.ko. + - &prompt.user; swapinfo -Device 1K-blocks Used Avail Capacity -/dev/ad0s1b.bde 542720 0 542720 0% - + + The userland management utility, + &man.hastctl.8;. + - If &man.geli.8; is being used: + + The &man.hast.conf.5; configuration file. This file + must exist before starting + hastd. + + - &prompt.user; swapinfo -Device 1K-blocks Used Avail Capacity -/dev/ad0s1b.eli 542720 0 542720 0% - + Users who prefer to statically build + GEOM_GATE support into the kernel should + add this line to the custom kernel configuration file, then + rebuild the kernel using the instructions in : + + options GEOM_GATE + + The following example describes how to configure two nodes + in master-slave/primary-secondary operation using + HAST to replicate the data between the two. + The nodes will be called hasta, with an + IP address of + 172.16.0.1, and hastb, + with an IP address of + 172.16.0.2. Both nodes will have a + dedicated hard drive /dev/ad6 of the same + size for HAST operation. The + HAST pool, sometimes referred to as a + resource or the GEOM provider in /dev/hast/, will be called + test. + + Configuration of HAST is done using + /etc/hast.conf. This file should be + identical on both nodes. The simplest configuration + is: + + resource test { + on hasta { + local /dev/ad6 + remote 172.16.0.2 + } + on hastb { + local /dev/ad6 + remote 172.16.0.1 + } +} + + For more advanced configuration, refer to + &man.hast.conf.5;. + + + It is also possible to use host names in the + remote statements if the hosts are + resolvable and defined either in + /etc/hosts or in the local + DNS. + + + Once the configuration exists on both nodes, the + HAST pool can be created. Run these + commands on both nodes to place the initial metadata onto the + local disk and to start &man.hastd.8;: + + &prompt.root; hastctl create test +&prompt.root; service hastd onestart + + + It is not possible to use + GEOM + providers with an existing file system or to convert an + existing storage to a HAST-managed pool. + This procedure needs to store some metadata on the provider + and there will not be enough required space available on an + existing provider. + + + A HAST node's primary or + secondary role is selected by an + administrator, or software like + Heartbeat, using &man.hastctl.8;. + On the primary node, hasta, issue this + command: + + &prompt.root; hastctl role primary test + + Run this command on the secondary node, + hastb: + + &prompt.root; hastctl role secondary test + + Verify the result by running hastctl on + each node: + + &prompt.root; hastctl status test + + Check the status line in the output. + If it says degraded, something is wrong + with the configuration file. It should say + complete on each node, meaning that the + synchronization between the nodes has started. The + synchronization completes when hastctl + status reports 0 bytes of dirty + extents. + + The next step is to create a file system on the + GEOM provider and mount it. This must be + done on the primary node. Creating the + file system can take a few minutes, depending on the size of + the hard drive. This example creates a UFS + file system on /dev/hast/test: + + &prompt.root; newfs -U /dev/hast/test +&prompt.root; mkdir /hast/test +&prompt.root; mount /dev/hast/test /hast/test + + Once the HAST framework is configured + properly, the final step is to make sure that + HAST is started automatically during + system boot. Add this line to + /etc/rc.conf: + + hastd_enable="YES" + + + Failover Configuration + + The goal of this example is to build a robust storage + system which is resistant to the failure of any given node. + If the primary node fails, the secondary node is there to + take over seamlessly, check and mount the file system, and + continue to work without missing a single bit of + data. + + To accomplish this task, the Common Address Redundancy + Protocol (CARP) is used to provide for + automatic failover at the IP layer. + CARP allows multiple hosts on the same + network segment to share an IP address. + Set up CARP on both nodes of the cluster + according to the documentation available in . In this example, each node will have + its own management IP address and a + shared IP address of + 172.16.0.254. The primary + HAST node of the cluster must be the + master CARP node. + + The HAST pool created in the previous + section is now ready to be exported to the other hosts on + the network. This can be accomplished by exporting it + through NFS or + Samba, using the shared + IP address + 172.16.0.254. The only problem + which remains unresolved is an automatic failover should the + primary node fail. + + In the event of CARP interfaces going + up or down, the &os; operating system generates a + &man.devd.8; event, making it possible to watch for state + changes on the CARP interfaces. A state + change on the CARP interface is an + indication that one of the nodes failed or came back online. + These state change events make it possible to run a script + which will automatically handle the HAST failover. + + To catch state changes on the + CARP interfaces, add this configuration + to /etc/devd.conf on each node: + + notify 30 { + match "system" "IFNET"; + match "subsystem" "carp0"; + match "type" "LINK_UP"; + action "/usr/local/sbin/carp-hast-switch master"; +}; + +notify 30 { + match "system" "IFNET"; + match "subsystem" "carp0"; + match "type" "LINK_DOWN"; + action "/usr/local/sbin/carp-hast-switch slave"; +}; + + + If the systems are running &os; 10 or higher, + replace carp0 with the name of the + CARP-configured interface. + + + Restart &man.devd.8; on both nodes to put the new + configuration into effect: + + &prompt.root; service devd restart + + When the specified interface state changes by going up + or down , the system generates a notification, allowing the + &man.devd.8; subsystem to run the specified automatic + failover script, + /usr/local/sbin/carp-hast-switch. + For further clarification about this configuration, refer to + &man.devd.conf.5;. + + Here is an example of an automated failover + script: + + #!/bin/sh + +# Original script by Freddie Cash <fjwcash@gmail.com> +# Modified by Michael W. Lucas <mwlucas@BlackHelicopters.org> +# and Viktor Petersson <vpetersson@wireload.net> + +# The names of the HAST resources, as listed in /etc/hast.conf +resources="test" + +# delay in mounting HAST resource after becoming master +# make your best guess +delay=3 + +# logging +log="local0.debug" +name="carp-hast" + +# end of user configurable stuff + +case "$1" in + master) + logger -p $log -t $name "Switching to primary provider for ${resources}." + sleep ${delay} + + # Wait for any "hastd secondary" processes to stop + for disk in ${resources}; do + while $( pgrep -lf "hastd: ${disk} \(secondary\)" > /dev/null 2>&1 ); do + sleep 1 + done + + # Switch role for each disk + hastctl role primary ${disk} + if [ $? -ne 0 ]; then + logger -p $log -t $name "Unable to change role to primary for resource ${disk}." + exit 1 + fi + done + + # Wait for the /dev/hast/* devices to appear + for disk in ${resources}; do + for I in $( jot 60 ); do + [ -c "/dev/hast/${disk}" ] && break + sleep 0.5 + done + + if [ ! -c "/dev/hast/${disk}" ]; then + logger -p $log -t $name "GEOM provider /dev/hast/${disk} did not appear." + exit 1 + fi + done + + logger -p $log -t $name "Role for HAST resources ${resources} switched to primary." + + + logger -p $log -t $name "Mounting disks." + for disk in ${resources}; do + mkdir -p /hast/${disk} + fsck -p -y -t ufs /dev/hast/${disk} + mount /dev/hast/${disk} /hast/${disk} + done + + ;; + + slave) + logger -p $log -t $name "Switching to secondary provider for ${resources}." + + # Switch roles for the HAST resources + for disk in ${resources}; do + if ! mount | grep -q "^/dev/hast/${disk} on " + then + else + umount -f /hast/${disk} + fi + sleep $delay + hastctl role secondary ${disk} 2>&1 + if [ $? -ne 0 ]; then + logger -p $log -t $name "Unable to switch role to secondary for resource ${disk}." + exit 1 + fi + logger -p $log -t $name "Role switched to secondary for resource ${disk}." + done + ;; +esac + + In a nutshell, the script takes these actions when a + node becomes master: + + + + Promotes the HAST pool to + primary on the other node. + + + + Checks the file system under the + HAST pool. + + + + Mounts the pool. + + + + When a node becomes secondary: + + + + Unmounts the HAST pool. + + + + Degrades the HAST pool to + secondary. + + + + + This is just an example script which serves as a proof + of concept. It does not handle all the possible scenarios + and can be extended or altered in any way, for example, to + start or stop required services. + + + + For this example, a standard UFS + file system was used. To reduce the time needed for + recovery, a journal-enabled UFS or + ZFS file system can be used + instead. + + + More detailed information with additional examples can + be found at http://wiki.FreeBSD.org/HAST. + + + + + Troubleshooting + + HAST should generally work without + issues. However, as with any other software product, there + may be times when it does not work as supposed. The sources + of the problems may be different, but the rule of thumb is to + ensure that the time is synchronized between the nodes of the + cluster. + + When troubleshooting HAST, the + debugging level of &man.hastd.8; should be increased by + starting hastd with -d. + This argument may be specified multiple times to further + increase the debugging level. Consider also using + -F, which starts hastd + in the foreground. + + + Recovering from the Split-brain Condition + + Split-brain occurs when the nodes + of the cluster are unable to communicate with each other, + and both are configured as primary. This is a dangerous + condition because it allows both nodes to make incompatible + changes to the data. This problem must be corrected + manually by the system administrator. + + The administrator must decide which node has more + important changes or merge them manually. Then, let + HAST perform full synchronization of the + node which has the broken data. To do this, issue these + commands on the node which needs to be + resynchronized: + + &prompt.root; hastctl role init test +&prompt.root; hastctl create test +&prompt.root; hastctl role secondary test +
Index: zh_TW.UTF-8/books/handbook/dtrace/Makefile =================================================================== --- /dev/null +++ zh_TW.UTF-8/books/handbook/dtrace/Makefile @@ -0,0 +1,15 @@ +# +# Build the Handbook with just the content from this chapter. +# +# $FreeBSD: head/en_US.ISO8859-1/books/handbook/dtrace/Makefile 39631 2012-10-01 09:53:01Z gabor $ +# + +CHAPTERS= dtrace/chapter.xml + +VPATH= .. + +MASTERDOC= ${.CURDIR}/../${DOC}.${DOCBOOKSUFFIX} + +DOC_PREFIX?= ${.CURDIR}/../../../.. + +.include "../Makefile" Index: zh_TW.UTF-8/books/handbook/eresources/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/eresources/chapter.xml +++ zh_TW.UTF-8/books/handbook/eresources/chapter.xml @@ -1,12 +1,16 @@ - + + 網際網路上的資源 進展飛快的 FreeBSD 使得現有的印刷、平面媒體跟不上它的最新進度! @@ -112,11 +116,6 @@ - &a.policy.name; - FreeBSD Core team 的 policy 方針討論區。這裡文章不多,且只限 core team 才可發言。 - - - &a.questions.name; 使用問題及技術支援 @@ -194,16 +193,6 @@ - &a.audit.name; - Source code 的稽核(audit)計劃 - - - - &a.binup.name; - 研發 binary 的升級方式 - - - &a.bluetooth.name; 在 FreeBSD 中使用藍芽(&bluetooth;)技術 @@ -214,11 +203,6 @@ - &a.cvsweb.name; - CVSweb 的維護 - - - &a.database.name; 討論各式資料庫在 FreeBSD 的研發、運用 @@ -314,11 +298,6 @@ - &a.libh.name; - 新世代的安裝、打包套件機制 - - - &a.mips.name; 移植 FreeBSD 到 &mips; @@ -349,12 +328,6 @@ - &a.openoffice.name; - 移植 OpenOffice.org 及 - &staroffice; 到 FreeBSD - - - &a.performance.name; 在高效能/負荷環境下的效能調校(tuning)議題 @@ -727,44 +700,6 @@ - &a.audit.name; - - - Source code audit project - - This is the mailing list for the FreeBSD source code - audit project. Although this was originally intended for - security-related changes, its charter has been expanded to - review any code changes. - - This list is very heavy on patches, and is probably of no - interest to the average FreeBSD user. Security discussions - not related to a particular code change are held on - freebsd-security. Conversely, all developers are encouraged - to send their patches here for review, especially if they - touch a part of the system where a bug may adversely affect - the integrity of the system. - - - - - - - &a.binup.name; - - - FreeBSD Binary Update Project - - This list exists to provide discussion for the binary - update system, or binup. - Design issues, implementation details, - patches, bug reports, status reports, feature requests, commit - logs, and all other things related to - binup are fair game. - - - - &a.bluetooth.name; @@ -854,17 +789,6 @@ - &a.cvsweb.name; - - - FreeBSD CVSweb Project - - Technical discussions about use, development and maintenance - of FreeBSD-CVSweb. - - - - &a.doc.name; @@ -1129,18 +1053,6 @@ - &a.openoffice.name; - - - OpenOffice.org - - Discussions concerning the porting and maintenance - of OpenOffice.org and - &staroffice;. - - - - &a.performance.name; @@ -1195,17 +1107,6 @@ - &a.policy.name; - - - Core team policy decisions - - This is a low volume, read-only mailing list for FreeBSD - Core Team Policy decisions. - - - - &a.ports.name; Index: zh_TW.UTF-8/books/handbook/filesystems/Makefile =================================================================== --- /dev/null +++ zh_TW.UTF-8/books/handbook/filesystems/Makefile @@ -0,0 +1,15 @@ +# +# Build the Handbook with just the content from this chapter. +# +# $FreeBSD: head/en_US.ISO8859-1/books/handbook/filesystems/Makefile 39631 2012-10-01 09:53:01Z gabor $ +# + +CHAPTERS= filesystems/chapter.xml + +VPATH= .. + +MASTERDOC= ${.CURDIR}/../${DOC}.${DOCBOOKSUFFIX} + +DOC_PREFIX?= ${.CURDIR}/../../../.. + +.include "../Makefile" Index: zh_TW.UTF-8/books/handbook/geom/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/geom/chapter.xml +++ zh_TW.UTF-8/books/handbook/geom/chapter.xml @@ -1,20 +1,31 @@ - - GEOM: Modular Disk Transformation Framework + + + + GEOM: Modular Disk Transformation Framework + - TomRhodesWritten by + + + Tom + Rhodes + + Written by + - - - + 概述 @@ -69,25 +80,27 @@ - - GEOM 導論 - - GEOM 透過 privoder(即 /dev/ - 下的特殊裝置檔案) 來操控 classes(如 Master Boot Records、 - BSD labels 等) 。GEOM 支援多種軟體 - RAID 配置,透過 GEOM 存取時, - 作業系統和應用程式不會意識到 GEOM 存在。 - - - - RAID0 - 分散連結(striping) - - TomRhodesWritten by - MurrayStokely - - - - + + + RAID0 - 分散連結(striping) + + + + + Tom + Rhodes + + Written by + + + + + Murray + Stokely + + + + GEOM @@ -189,7 +202,7 @@ - + RAID1 - 鏡射(Mirroring) @@ -353,4 +366,855 @@ + + + + + <acronym>RAID</acronym>3 - Byte-level Striping with + Dedicated Parity + + + + + Mark + Gladman + + Written by + + + + + Daniel + Gerzo + + + + + + + + Tom + Rhodes + + Based on documentation by + + + + + Murray + Stokely + + + + + + + GEOM + + + RAID3 + + + RAID3 is a method used to combine several + disk drives into a single volume with a dedicated parity disk. + In a RAID3 system, data is split up into a + number of bytes that are written across all the drives in the + array except for one disk which acts as a dedicated parity disk. + This means that disk reads from a RAID3 + implementation access all disks in the array. Performance can + be enhanced by using multiple disk controllers. The + RAID3 array provides a fault tolerance of 1 + drive, while providing a capacity of 1 - 1/n times the total + capacity of all drives in the array, where n is the number of + hard drives in the array. Such a configuration is mostly + suitable for storing data of larger sizes such as multimedia + files. + + At least 3 physical hard drives are required to build a + RAID3 array. Each disk must be of the same + size, since I/O requests are interleaved to + read or write to multiple disks in parallel. Also, due to the + nature of RAID3, the number of drives must be + equal to 3, 5, 9, 17, and so on, or 2^n + 1. + + This section demonstrates how to create a software + RAID3 on a &os; system. + + + While it is theoretically possible to boot from a + RAID3 array on &os;, that configuration is + uncommon and is not advised. + + + + Creating a Dedicated <acronym>RAID</acronym>3 + Array + + In &os;, support for RAID3 is + implemented by the &man.graid3.8; GEOM + class. Creating a dedicated RAID3 array on + &os; requires the following steps. + + + + First, load the geom_raid3.ko + kernel module by issuing one of the following + commands: + + &prompt.root; graid3 load + + or: + + &prompt.root; kldload geom_raid3 + + + + Ensure that a suitable mount point exists. This + command creates a new directory to use as the mount + point: + + &prompt.root; mkdir /multimedia + + + + Determine the device names for the disks which will be + added to the array, and create the new + RAID3 device. The final device listed + will act as the dedicated parity disk. This example uses + three unpartitioned ATA drives: + ada1 and + ada2 for + data, and + ada3 for + parity. + + &prompt.root; graid3 label -v gr0 /dev/ada1 /dev/ada2 /dev/ada3 +Metadata value stored on /dev/ada1. +Metadata value stored on /dev/ada2. +Metadata value stored on /dev/ada3. +Done. + + + + Partition the newly created gr0 + device and put a UFS file system on + it: + + &prompt.root; gpart create -s GPT /dev/raid3/gr0 +&prompt.root; gpart add -t freebsd-ufs /dev/raid3/gr0 +&prompt.root; newfs -j /dev/raid3/gr0p1 + + Many numbers will glide across the screen, and after a + bit of time, the process will be complete. The volume has + been created and is ready to be mounted: + + &prompt.root; mount /dev/raid3/gr0p1 /multimedia/ + + The RAID3 array is now ready to + use. + + + + Additional configuration is needed to retain this setup + across system reboots. + + + + The geom_raid3.ko module must be + loaded before the array can be mounted. To automatically + load the kernel module during system initialization, add + the following line to + /boot/loader.conf: + + geom_raid3_load="YES" + + + + The following volume information must be added to + /etc/fstab in order to + automatically mount the array's file system during the + system boot process: + + /dev/raid3/gr0p1 /multimedia ufs rw 2 2 + + + + + + + + Software <acronym>RAID</acronym> Devices + + + + + Warren + Block + + Originally contributed by + + + + + + GEOM + + + Software RAID Devices + Hardware-assisted RAID + + + Some motherboards and expansion cards add some simple + hardware, usually just a ROM, that allows the + computer to boot from a RAID array. After + booting, access to the RAID array is handled + by software running on the computer's main processor. This + hardware-assisted software + RAID gives RAID + arrays that are not dependent on any particular operating + system, and which are functional even before an operating system + is loaded. + + Several levels of RAID are supported, + depending on the hardware in use. See &man.graid.8; for a + complete list. + + &man.graid.8; requires the geom_raid.ko + kernel module, which is included in the + GENERIC kernel starting with &os; 9.1. + If needed, it can be loaded manually with + graid load. + + + Creating an Array + + Software RAID devices often have a menu + that can be entered by pressing special keys when the computer + is booting. The menu can be used to create and delete + RAID arrays. &man.graid.8; can also create + arrays directly from the command line. + + graid label is used to create a new + array. The motherboard used for this example has an Intel + software RAID chipset, so the Intel + metadata format is specified. The new array is given a label + of gm0, it is a mirror + (RAID1), and uses drives + ada0 and + ada1. + + + Some space on the drives will be overwritten when they + are made into a new array. Back up existing data + first! + + + &prompt.root; graid label Intel gm0 RAID1 ada0 ada1 +GEOM_RAID: Intel-a29ea104: Array Intel-a29ea104 created. +GEOM_RAID: Intel-a29ea104: Disk ada0 state changed from NONE to ACTIVE. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:0-ada0 state changed from NONE to ACTIVE. +GEOM_RAID: Intel-a29ea104: Disk ada1 state changed from NONE to ACTIVE. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:1-ada1 state changed from NONE to ACTIVE. +GEOM_RAID: Intel-a29ea104: Array started. +GEOM_RAID: Intel-a29ea104: Volume gm0 state changed from STARTING to OPTIMAL. +Intel-a29ea104 created +GEOM_RAID: Intel-a29ea104: Provider raid/r0 for volume gm0 created. + + A status check shows the new mirror is ready for + use: + + &prompt.root; graid status + Name Status Components +raid/r0 OPTIMAL ada0 (ACTIVE (ACTIVE)) + ada1 (ACTIVE (ACTIVE)) + + The array device appears in + /dev/raid/. The first array is called + r0. Additional arrays, if present, will + be r1, r2, and so + on. + + The BIOS menu on some of these devices + can create arrays with special characters in their names. To + avoid problems with those special characters, arrays are given + simple numbered names like r0. To show + the actual labels, like gm0 in the + example above, use &man.sysctl.8;: + + &prompt.root; sysctl kern.geom.raid.name_format=1 + + + + Multiple Volumes + + Some software RAID devices support + more than one volume on an array. + Volumes work like partitions, allowing space on the physical + drives to be split and used in different ways. For example, + Intel software RAID devices support two + volumes. This example creates a 40 G mirror for safely + storing the operating system, followed by a 20 G + RAID0 (stripe) volume for fast temporary + storage: + + &prompt.root; graid label -S 40G Intel gm0 RAID1 ada0 ada1 +&prompt.root; graid add -S 20G gm0 RAID0 + + Volumes appear as additional + rX entries + in /dev/raid/. An array with two volumes + will show r0 and + r1. + + See &man.graid.8; for the number of volumes supported by + different software RAID devices. + + + + Converting a Single Drive to a Mirror + + Under certain specific conditions, it is possible to + convert an existing single drive to a &man.graid.8; array + without reformatting. To avoid data loss during the + conversion, the existing drive must meet these minimum + requirements: + + + + The drive must be partitioned with the + MBR partitioning scheme. + GPT or other partitioning schemes with + metadata at the end of the drive will be overwritten and + corrupted by the &man.graid.8; metadata. + + + + There must be enough unpartitioned and unused space at + the end of the drive to hold the &man.graid.8; metadata. + This metadata varies in size, but the largest occupies + 64 M, so at least that much free space is + recommended. + + + + If the drive meets these requirements, start by making a + full backup. Then create a single-drive mirror with that + drive: + + &prompt.root; graid label Intel gm0 RAID1 ada0 NONE + + &man.graid.8; metadata was written to the end of the drive + in the unused space. A second drive can now be inserted into + the mirror: + + &prompt.root; graid insert raid/r0 ada1 + + Data from the original drive will immediately begin to be + copied to the second drive. The mirror will operate in + degraded status until the copy is complete. + + + + Inserting New Drives into the Array + + Drives can be inserted into an array as replacements for + drives that have failed or are missing. If there are no + failed or missing drives, the new drive becomes a spare. For + example, inserting a new drive into a working two-drive mirror + results in a two-drive mirror with one spare drive, not a + three-drive mirror. + + In the example mirror array, data immediately begins to be + copied to the newly-inserted drive. Any existing information + on the new drive will be overwritten. + + &prompt.root; graid insert raid/r0 ada1 +GEOM_RAID: Intel-a29ea104: Disk ada1 state changed from NONE to ACTIVE. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:1-ada1 state changed from NONE to NEW. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:1-ada1 state changed from NEW to REBUILD. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:1-ada1 rebuild start at 0. + + + + Removing Drives from the Array + + Individual drives can be permanently removed from a + from an array and their metadata erased: + + &prompt.root; graid remove raid/r0 ada1 +GEOM_RAID: Intel-a29ea104: Disk ada1 state changed from ACTIVE to OFFLINE. +GEOM_RAID: Intel-a29ea104: Subdisk gm0:1-[unknown] state changed from ACTIVE to NONE. +GEOM_RAID: Intel-a29ea104: Volume gm0 state changed from OPTIMAL to DEGRADED. + + + + Stopping the Array + + An array can be stopped without removing metadata from the + drives. The array will be restarted when the system is + booted. + + &prompt.root; graid stop raid/r0 + + + + Checking Array Status + + Array status can be checked at any time. After a drive + was added to the mirror in the example above, data is being + copied from the original drive to the new drive: + + &prompt.root; graid status + Name Status Components +raid/r0 DEGRADED ada0 (ACTIVE (ACTIVE)) + ada1 (ACTIVE (REBUILD 28%)) + + Some types of arrays, like RAID0 or + CONCAT, may not be shown in the status + report if disks have failed. To see these partially-failed + arrays, add : + + &prompt.root; graid status -ga + Name Status Components +Intel-e2d07d9a BROKEN ada6 (ACTIVE (ACTIVE)) + + + + Deleting Arrays + + Arrays are destroyed by deleting all of the volumes from + them. When the last volume present is deleted, the array is + stopped and metadata is removed from the drives: + + &prompt.root; graid delete raid/r0 + + + + Deleting Unexpected Arrays + + Drives may unexpectedly contain &man.graid.8; metadata, + either from previous use or manufacturer testing. + &man.graid.8; will detect these drives and create an array, + interfering with access to the individual drive. To remove + the unwanted metadata: + + + + Boot the system. At the boot menu, select + 2 for the loader prompt. Enter: + + OK set kern.geom.raid.enable=0 +OK boot + + The system will boot with &man.graid.8; + disabled. + + + + Back up all data on the affected drive. + + + + As a workaround, &man.graid.8; array detection + can be disabled by adding + + kern.geom.raid.enable=0 + + to /boot/loader.conf. + + To permanently remove the &man.graid.8; metadata + from the affected drive, boot a &os; installation + CD-ROM or memory stick, and select + Shell. Use status + to find the name of the array, typically + raid/r0: + + &prompt.root; graid status + Name Status Components +raid/r0 OPTIMAL ada0 (ACTIVE (ACTIVE)) + ada1 (ACTIVE (ACTIVE)) + + Delete the volume by name: + + &prompt.root; graid delete raid/r0 + + If there is more than one volume shown, repeat the + process for each volume. After the last array has been + deleted, the volume will be destroyed. + + Reboot and verify data, restoring from backup if + necessary. After the metadata has been removed, the + kern.geom.raid.enable=0 entry in + /boot/loader.conf can also be + removed. + + + + + + + <acronym>GEOM</acronym> Gate Network + + GEOM provides a simple mechanism for + providing remote access to devices such as disks, + CDs, and file systems through the use of the + GEOM Gate network daemon, + ggated. The system with the device + runs the server daemon which handles requests made by clients + using ggatec. The devices should not + contain any sensitive data as the connection between the client + and the server is not encrypted. + + Similar to NFS, which is discussed in + , ggated + is configured using an exports file. This file specifies which + systems are permitted to access the exported resources and what + level of access they are offered. For example, to give the + client 192.168.1.5 + read and write access to the fourth slice on the first + SCSI disk, create + /etc/gg.exports with this line: + + 192.168.1.5 RW /dev/da0s4d + + Before exporting the device, ensure it is not currently + mounted. Then, start ggated: + + &prompt.root; ggated + + Several options are available for specifying an alternate + listening port or changing the default location of the exports + file. Refer to &man.ggated.8; for details. + + To access the exported device on the client machine, first + use ggatec to specify the + IP address of the server and the device name + of the exported device. If successful, this command will + display a ggate device name to mount. Mount + that specified device name on a free mount point. This example + connects to the /dev/da0s4d partition on + 192.168.1.1, then mounts + /dev/ggate0 on + /mnt: + + &prompt.root; ggatec create -o rw 192.168.1.1 /dev/da0s4d +ggate0 +&prompt.root; mount /dev/ggate0 /mnt + + The device on the server may now be accessed through + /mnt on the client. For more details about + ggatec and a few usage examples, refer to + &man.ggatec.8;. + + + The mount will fail if the device is currently mounted on + either the server or any other client on the network. If + simultaneous access is needed to network resources, use + NFS instead. + + + When the device is no longer needed, unmount it with + umount so that the resource is available to + other clients. + + + + Labeling Disk Devices + + + GEOM + + + Disk Labels + + + During system initialization, the &os; kernel creates + device nodes as devices are found. This method of probing for + devices raises some issues. For instance, what if a new disk + device is added via USB? It is likely that + a flash device may be handed the device name of + da0 and the original + da0 shifted to + da1. This will cause issues mounting + file systems if they are listed in + /etc/fstab which may also prevent the + system from booting. + + One solution is to chain SCSI devices + in order so a new device added to the SCSI + card will be issued unused device numbers. But what about + USB devices which may replace the primary + SCSI disk? This happens because + USB devices are usually probed before the + SCSI card. One solution is to only insert + these devices after the system has been booted. Another method + is to use only a single ATA drive and never + list the SCSI devices in + /etc/fstab. + + A better solution is to use glabel to + label the disk devices and use the labels in + /etc/fstab. Because + glabel stores the label in the last sector of + a given provider, the label will remain persistent across + reboots. By using this label as a device, the file system may + always be mounted regardless of what device node it is accessed + through. + + + glabel can create both transient and + permanent labels. Only permanent labels are consistent across + reboots. Refer to &man.glabel.8; for more information on the + differences between labels. + + + + Label Types and Examples + + Permanent labels can be a generic or a file system label. + Permanent file system labels can be created with + &man.tunefs.8; or &man.newfs.8;. These types of labels are + created in a sub-directory of /dev, and + will be named according to the file system type. For example, + UFS2 file system labels will be created in + /dev/ufs. Generic permanent labels can + be created with glabel label. These are + not file system specific and will be created in + /dev/label. + + Temporary labels are destroyed at the next reboot. These + labels are created in /dev/label and are + suited to experimentation. A temporary label can be created + using glabel create. + + + + To create a permanent label for a + UFS2 file system without destroying any + data, issue the following command: + + &prompt.root; tunefs -L home /dev/da3 + + + If the file system is full, this may cause data + corruption. + + + A label should now exist in /dev/ufs + which may be added to /etc/fstab: + + /dev/ufs/home /home ufs rw 2 2 + + + The file system must not be mounted while attempting + to run tunefs. + + + Now the file system may be mounted: + + &prompt.root; mount /home + + From this point on, so long as the + geom_label.ko kernel module is loaded at + boot with /boot/loader.conf or the + GEOM_LABEL kernel option is present, + the device node may change without any ill effect on the + system. + + File systems may also be created with a default label + by using the flag with + newfs. Refer to &man.newfs.8; for + more information. + + The following command can be used to destroy the + label: + + &prompt.root; glabel destroy home + + The following example shows how to label the partitions of + a boot disk. + + + Labeling Partitions on the Boot Disk + + By permanently labeling the partitions on the boot disk, + the system should be able to continue to boot normally, even + if the disk is moved to another controller or transferred to + a different system. For this example, it is assumed that a + single ATA disk is used, which is + currently recognized by the system as + ad0. It is also assumed that the + standard &os; partition scheme is used, with + /, + /var, + /usr and + /tmp, as + well as a swap partition. + + Reboot the system, and at the &man.loader.8; prompt, + press 4 to boot into single user mode. + Then enter the following commands: + + &prompt.root; glabel label rootfs /dev/ad0s1a +GEOM_LABEL: Label for provider /dev/ad0s1a is label/rootfs +&prompt.root; glabel label var /dev/ad0s1d +GEOM_LABEL: Label for provider /dev/ad0s1d is label/var +&prompt.root; glabel label usr /dev/ad0s1f +GEOM_LABEL: Label for provider /dev/ad0s1f is label/usr +&prompt.root; glabel label tmp /dev/ad0s1e +GEOM_LABEL: Label for provider /dev/ad0s1e is label/tmp +&prompt.root; glabel label swap /dev/ad0s1b +GEOM_LABEL: Label for provider /dev/ad0s1b is label/swap +&prompt.root; exit + + The system will continue with multi-user boot. After + the boot completes, edit /etc/fstab and + replace the conventional device names, with their respective + labels. The final /etc/fstab will + look like this: + + # Device Mountpoint FStype Options Dump Pass# +/dev/label/swap none swap sw 0 0 +/dev/label/rootfs / ufs rw 1 1 +/dev/label/tmp /tmp ufs rw 2 2 +/dev/label/usr /usr ufs rw 2 2 +/dev/label/var /var ufs rw 2 2 + + The system can now be rebooted. If everything went + well, it will come up normally and mount + will show: + + &prompt.root; mount +/dev/label/rootfs on / (ufs, local) +devfs on /dev (devfs, local) +/dev/label/tmp on /tmp (ufs, local, soft-updates) +/dev/label/usr on /usr (ufs, local, soft-updates) +/dev/label/var on /var (ufs, local, soft-updates) + + + Starting with &os; 7.2, the &man.glabel.8; class + supports a new label type for UFS file + systems, based on the unique file system id, + ufsid. These labels may be found in + /dev/ufsid and are + created automatically during system startup. It is possible + to use ufsid labels to mount partitions + using /etc/fstab. Use glabel + status to receive a list of file systems and their + corresponding ufsid labels: + + &prompt.user; glabel status + Name Status Components +ufsid/486b6fc38d330916 N/A ad4s1d +ufsid/486b6fc16926168e N/A ad4s1f + + In the above example, ad4s1d + represents /var, + while ad4s1f represents + /usr. + Using the ufsid values shown, these + partitions may now be mounted with the following entries in + /etc/fstab: + + /dev/ufsid/486b6fc38d330916 /var ufs rw 2 2 +/dev/ufsid/486b6fc16926168e /usr ufs rw 2 2 + + Any partitions with ufsid labels can be + mounted in this way, eliminating the need to manually create + permanent labels, while still enjoying the benefits of device + name independent mounting. + + + + + UFS Journaling Through <acronym>GEOM</acronym> + + + GEOM + + + Journaling + + + Beginning with &os; 7.0, support for journals on + UFS file systems is available. The + implementation is provided through the GEOM + subsystem and is configured using gjournal. + Unlike other file system journaling implementations, the + gjournal method is block based and not + implemented as part of the file system. It is a + GEOM extension. + + Journaling stores a log of file system transactions, such as + changes that make up a complete disk write operation, before + meta-data and file writes are committed to the disk. This + transaction log can later be replayed to redo file system + transactions, preventing file system inconsistencies. + + This method provides another mechanism to protect against + data loss and inconsistencies of the file system. Unlike Soft + Updates, which tracks and enforces meta-data updates, and + snapshots, which create an image of the file system, a log is + stored in disk space specifically for this task. For better + performance, the journal may be stored on another disk. In this + configuration, the journal provider or storage device should be + listed after the device to enable journaling on. + + The GENERIC kernel provides support for + gjournal. To automatically load the + geom_journal.ko kernel module at boot time, + add the following line to + /boot/loader.conf: + + geom_journal_load="YES" + + If a custom kernel is used, ensure the following line is in + the kernel configuration file: + + options GEOM_JOURNAL + + Once the module is loaded, a journal can be created on a new + file system using the following steps. In this example, + da4 is a new SCSI + disk: + + &prompt.root; gjournal load +&prompt.root; gjournal label /dev/da4 + + This will load the module and create a + /dev/da4.journal device node on + /dev/da4. + + A UFS file system may now be created on + the journaled device, then mounted on an existing mount + point: + + &prompt.root; newfs -O 2 -J /dev/da4.journal +&prompt.root; mount /dev/da4.journal /mnt + + + In the case of several slices, a journal will be created + for each individual slice. For instance, if + ad4s1 and ad4s2 are + both slices, then gjournal will create + ad4s1.journal and + ad4s2.journal. + + + Journaling may also be enabled on current file systems by + using tunefs. However, + always make a backup before attempting to + alter an existing file system. In most cases, + gjournal will fail if it is unable to create + the journal, but this does not protect against data loss + incurred as a result of misusing tunefs. + Refer to &man.gjournal.8; and &man.tunefs.8; for more + information about these commands. + + It is possible to journal the boot disk of a &os; system. + Refer to the article + Implementing UFS Journaling on a Desktop PC for detailed + instructions. + Index: zh_TW.UTF-8/books/handbook/install/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/install/chapter.xml +++ zh_TW.UTF-8/books/handbook/install/chapter.xml @@ -1,26 +1,29 @@ - - 安裝 FreeBSD + + + 安裝 FreeBSD + - JimMockRestructured, reorganized, and parts - rewritten by + JimMockRestructured, + reorganized, and parts rewritten by - RandyPrattThe sysinstall walkthrough, screenshots, and general - copy by + RandyPrattThe + sysinstall walkthrough, screenshots, and general copy by + - - - 概述 @@ -84,7 +87,7 @@ Information 找相關的 Installation Notes 說明。 接下來的章節會有相關說明整理。 根據安裝 &os; 的方式不同,可能會需要軟碟機或光碟機, - 或某些情況則是要網路卡。 這些部份會在 有介紹。 + 或某些情況則是要網路卡。 這些部份會在 有介紹。 &os;/&arch.i386; 及 &os;/&arch.pc98; 架構 @@ -412,52 +415,6 @@ - - - - - Alpha 架構的磁碟配置模式 - - 在 Alpha 上,您必須使用一整顆硬碟給 FreeBSD, - 沒有辦法在同顆硬碟上跟其他作業系統共存。 依不同型號的 Alpha - 機器,您的硬碟可以是 SCSI 或 IDE 硬碟, - 只要您的機器可以從這些硬碟開機就可以。 - - 按照 Digital / Compaq 使用手冊的編排風格, - 所有 SRM 輸入的部分都用大寫表示。 注意:SRM 大小寫有別。 - - 要得知您磁碟的名稱以及型號,可以在 SRM console 提示下使用 - SHOW DEVICE 命令: - - >>>SHOW DEVICE -dka0.0.0.4.0 DKA0 TOSHIBA CD-ROM XM-57 3476 -dkc0.0.0.1009.0 DKC0 RZ1BB-BS 0658 -dkc100.1.0.1009.0 DKC100 SEAGATE ST34501W 0015 -dva0.0.0.0.1 DVA0 -ewa0.0.0.3.0 EWA0 00-00-F8-75-6D-01 -pkc0.7.0.1009.0 PKC0 SCSI Bus ID 7 5.27 -pqa0.0.0.4.0 PQA0 PCI EIDE -pqb0.0.1.4.0 PQB0 PCI EIDE - - 例子中機器為 Digital Personal Workstation 433au, - 並且顯示出此機器有連接三個磁碟機。 第一個是 CDROM,叫做 - DKA0 ;另外兩個是磁碟機, 分別叫做: - DKC0DKC100。 - - - 磁碟機的名稱中有 DKx - 字樣的是 SCSI 硬碟。例如: DKA100 - 表示是 SCSI 硬碟,其 SCSI ID 為 1, 位在第一個 SCSI 匯流排(A); - 而 DKC300 表示是 SCSI 硬碟, - 其 SCSI ID 為 3,位於第三個 SCSI 匯流排(C)。 - 裝置名稱 PKx 則為 SCSI 控制卡。 - 由上述 SHOW DEVICE 的結果看來, - SCSI 光碟機也被視為是 SCSI 硬碟的一種。 - - 若為 IDE 硬碟的話,名稱會有 DQx 字樣, - 而 PQx 則表示相對應的 IDE 磁碟控制器。 - - @@ -583,58 +540,176 @@ - 若已經有 FreeBSD 的 CD 或 DVD,但機器不支援從光碟開機的話, - 那麼請直接進下一節 ()。 + 若已經有 FreeBSD 的 CD 或 DVD, + 那麼請直接進下一節 ()。 若沒有 FreeBSD 安裝片的話,那麼請先看 這裡會介紹如何準備所需要的安裝片, - 照該節步驟弄好後,就可以繼續下一步 。 + 照該節步驟弄好後,就可以繼續下一步 - - 準備好開機磁片 + + Prepare the Boot Media - FreeBSD 安裝流程是要從電腦開機後,進入 FreeBSD 安裝畫面 —— - 而不是在其他作業系統上執行程式。 - 一般來講,電腦都是用裝在硬碟上的作業系統來開機, - 也可以用開機磁片來開機; - 此外,現在大多數電腦都可以從光碟開機。 + The &os; installation process is started by booting the + computer into the &os; installer. It is not a program that + can be run within another operating system. The computer + normally boots using the operating system installed on the + hard disk, but it can also be configured to boot from a CDROM + or from a USB disk. - 如果您有 FreeBSD 的 CDROM 或 DVD(無論是用買現成的或是自己燒錄的), - 且您的電腦可支援由光碟開機,(通常在 BIOS 中會有 - Boot Order 或類似選項),那麼您就可以跳過此小節。 - 因為 FreeBSD CDROM 或 DVD 都可以用來開機。 + If installing from a CD/DVD to a computer whose BIOS + supports booting from the CD/DVD, skip this section. The + &os; CD/DVD images are bootable and can be used to install + &os; without any other special preparation. - 請按照下面步驟,以製作開機片: + To create a bootable memory stick, follow these + steps: - 取得開機片的映像檔(images) + Acquire the Memory Stick Image - 開機磁片用的映像檔(images)通常會放在光碟片上的 - floppies/ 目錄內, - 另外也可以從像是下面 FTP 站的 floppies 目錄下載: - ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/<arch>/<version>-RELEASE/floppies/ - 。請將『arch』、『version』替換為打算安裝的電腦架構、OS 版本。 - 例如:想裝的是 &os;/&arch.i386; &rel.current;-RELEASE - ,那麼可以到 ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/i386/&rel.current;-RELEASE/floppies/ 下載。 - - 映像檔(images)的附檔名都是 .flp。而 - floppies/ 目錄內包含一些不同用途的映像檔 - (images),這取決於您要裝的 FreeBSD 版本、需求、硬體配備為何。 - 通常要 4 個映像檔,也就是: boot.flp、 - kern1.flpkern2.flp、 - kern3.flp。 若有疑問的話,請翻閱同一目錄下的 - README.TXT 文件檔,以瞭解相關最新注意事項。 - + Memory stick images for + &os; 8.X can be downloaded + from the ISO-IMAGES/ directory at + ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/arch/ISO-IMAGES/version/&os;-version-RELEASE-arch-memstick.img. + Replace arch and + version with the architecture + and the version number to install. For example, the + memory stick images for + &os;/&arch.i386; &rel2.current;-RELEASE are + available from ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/&arch.i386;/ISO-IMAGES/&rel2.current;/&os;-&rel2.current;-RELEASE-&arch.i386;-memstick.img. + + + A different directory path is used for + &os; 9.0-RELEASE and later versions. How to + download and install + &os; 9.X + is covered in . + + + The memory stick image has a .img + extension. The ISO-IMAGES/ directory + contains a number of different images and the one to + use depends on the version of &os; and the type of media + supported by the hardware being installed to. - 在使用 FTP 下載時,必須使用 binary 模式 - 進行傳輸。 有些瀏覽器預設是以 text (或 - ASCII) 模式來傳輸資料, - 所以這些錯誤傳輸模式下載的映像檔所做成的磁片,會無法使用。 + Before proceeding, back up the + data on the USB stick, as this procedure will + erase it. + + + + + Write the Image File to the Memory Stick + + + Using &os; to Write the Image + + + The example below lists + /dev/da0 as the target device + where the image will be written. Be very careful that + you have the correct device as the output target, or + you may destroy your existing data. + + + + Writing the Image with &man.dd.1; + + The .img file is + not a regular file that can just + be copied to the memory stick. It is an image of the + complete contents of the disk. This means that + &man.dd.1; must be used to write the image directly to + the disk: + + &prompt.root; dd if=&os;-&rel2.current;-RELEASE-&arch.i386;-memstick.img of=/dev/da0 bs=64k + + If an Operation not + permitted error is displayed, make + certain that the target device is not in use, mounted, + or being automounted by another program. Then try + again. + + + + + Using &windows; to Write the Image + + + Make sure to use the correct drive letter as the + output target, as this command will overwrite and + destroy any existing data on the specified + device. + + + + Obtaining <application>Image Writer for + Windows</application> + + Image Writer for + Windows is a free application that can + correctly write an image file to a memory stick. + Download it from https://launchpad.net/win32-image-writer/ + and extract it into a folder. + + + + Writing the Image with Image Writer + + Double-click the + Win32DiskImager icon to + start the program. Verify that the drive letter shown + under Device is the + drive with the memory stick. Click the folder icon + and select the image to be written to the memory + stick. Click Save to accept + the image file name. Verify that everything is + correct, and that no folders on the memory stick are + open in other windows. Finally, click + Write to write the image file + to the drive. + + + + + + To create the boot floppy images for a &os;/&arch.pc98; + installation, follow these steps: + + + + Acquire the Boot Floppy Images + + The &os;/&arch.pc98; boot disks can be downloaded from + the floppies directory, + ftp://ftp.FreeBSD.org/pub/FreeBSD/releases/pc98/version-RELEASE/floppies/. + Replace version with the + version number to install. + + The floppy images have a .flp + extension. floppies/ contains a number + of different images. Download + boot.flp as well as the number of + files associated with the type of installation, such as + kern.small* or + kern*. + + + The FTP program must use binary + mode to download these disk images. Some + web browsers use text or + ASCII mode, which will be apparent + if the disks are not bootable. @@ -760,7 +835,7 @@ 若要用磁片安裝,請把在 - 一節中製作好的 + 一節中製作好的 boot.flp 那張安裝磁片放到第一台軟碟機中。 @@ -855,59 +930,6 @@ - - Alpha 平台的開機流程 - - Alpha - - - - 在一開始,電腦電源開關是關閉的。 - - - - 打開電腦電源開關,然後等開機畫面出現。 - - - - 若要用磁片安裝,請把在 - 一節中製作好的 - boot.flp 那張安裝磁片放到第一台軟碟機中。 - 然後,打下列指令來從磁片開機 - (請把下列軟碟機代號改為你電腦的軟碟機代號): - - >>>BOOT DVA0 -FLAGS '' -FILE '' - - 若要用光碟安裝,請把做好的安裝片放入光碟機, - 然後打下列指令來從光碟開機 - (請把下列光碟機代號改為你電腦的光碟機代號): - - >>>BOOT DKA0 -FLAGS '' -FILE '' - - - - 接著 FreeBSD 開機片就會開始了。若是由軟碟開機的話, - 這時會看到以下訊息: - - Insert disk labelled "Kernel floppy 1" and press any key... - - 請照指示,拿走 boot.flp 片,改放 - kern1.flp 片, - 然後按 Enter - - - - 無論從軟碟或光碟開機,您都會看到下面這段訊息: - - Hit [Enter] to boot immediately, or any other key for command prompt. -Booting [kernel] in 9 seconds... _ - - 您可以等待 10 秒,或是按 Enter 鍵。 - 接下來就會進入kernel configuration 選單。 - - - - &sparc64; 平台的開機流程 @@ -2346,7 +2368,7 @@ These services can be enabled after installation by editing /etc/inetd.conf with your favorite text editor. - See for more information. + See for more information. Select &gui.yes; if you wish to configure these services during install. An additional @@ -3969,7 +3991,7 @@ serial console. A serial console is basically using another machine to act as the main display and keyboard for a system. To do this, just follow the steps to create - installation floppies, explained in . + installation floppies, explained in . To modify these floppies to boot into a serial console, follow these steps: @@ -4106,112 +4128,156 @@ - Creating an Installation CDROM + Creating an Installation ISO - As part of each release, the FreeBSD project makes available at least two - CDROM images (ISO images) per supported architecture. These images can be written - (burned) to CDs if you have a CD writer, and then used - to install FreeBSD. If you have a CD writer, and bandwidth is cheap, - then this is the easiest way to install FreeBSD. + As part of each release, the &os; Project provides ISO + images for each supported architecture. These images can be + written (burned) to CD or DVD media using a + burning application, and then used to install &os;. If a + CD/DVD writer is available, this is the easiest way to install + &os;. Download the Correct ISO Images - The ISO images for each release can be downloaded from ftp://ftp.FreeBSD.org/pub/FreeBSD/ISO-IMAGES-arch/version or the closest mirror. - Substitute arch and + The ISO images for each release can be downloaded from + ftp://ftp.FreeBSD.org/pub/FreeBSD/ISO-IMAGES-arch/version + or the closest mirror. Substitute + arch and version as appropriate. - That directory will normally contain the following images: + An image directory normally contains the following + images: - FreeBSD 5.<replaceable>X</replaceable> and 6.<replaceable>X</replaceable> - ISO Image Names and Meanings + &os; + ISO Image Names and Meanings - 檔名 + Filename - 內容 + Contents - 版本-RELEASE-架構-bootonly.iso + &os;-version-RELEASE-arch-bootonly.iso + + This CD image starts the installation process + by booting from a CD-ROM drive but it does not + contain the support for installing &os; from the + CD itself. Perform a network based install, such + as from an FTP server, after booting from this + CD. + + + + &os;-version-RELEASE-arch-dvd1.iso.gz + + This DVD image contains everything necessary + to install the base &os; operating system, a + collection of pre-built packages, and the + documentation. It also supports booting into a + livefs based rescue mode. + + + + &os;-version-RELEASE-arch-memstick.img - Everything you need to boot into a FreeBSD - kernel and start the installation interface. - The installable files have to be pulled over FTP - or some other supported source. + This image can be written to a USB memory + stick in order to install machines capable of + booting from USB drives. It also supports booting + into a livefs based rescue mode. + The only included package is the documentation + package. - 版本-RELEASE-架構-disc1.iso + &os;-version-RELEASE-arch-disc1.iso - Everything you need to install &os; and a - live filesystem, which is used in - conjunction with the Repair facility - in sysinstall. + This image can be written to a USB memory + stick in order to install machines capable of + booting from USB drives. Similar to the + bootonly.iso image, it does + not contain the distribution sets on the medium + itself, but does support network-based + installations (for example, via ftp). - 版本-RELEASE-架構-disc2.iso + &os;-version-RELEASE-arch-disc1.iso - &os; 文件(&os; 6.2 之前的),以及許多 third-party - packages。 + This CD image contains the base &os; + operating system and the documentation package but + no other packages. - 版本-RELEASE-架構-docs.iso + &os;-version-RELEASE-arch-disc2.iso - &os; 文件(&os; 6.2 及之後)。 + A CD image with as many third-party packages + as would fit on the disc. This image is not + available for + &os; 9.X. + + + + &os;-version-RELEASE-arch-disc3.iso + + Another CD image with as many third-party + packages as would fit on the disc. This image is + not available for + &os; 9.X. + + + + &os;-version-RELEASE-arch-livefs.iso + + This CD image contains support for booting + into a livefs based rescue mode but + does not support doing an install from the CD + itself.
- You must download one of either the bootonly - ISO image (if available), or the image of disc one. Do not download - both of them, since the disc one image contains everything that the - bootonly ISO image contains. - - Use the bootonly ISO if Internet access is cheap for you. It will - let you install &os;, and you can then install third-party - packages by downloading them using the ports/packages system (see - ) as - necessary. - - Use the image of disc one if you want to install a &os; - release and want - a reasonable selection of third-party packages on the disc - as well. + When performing a CD installation, download either + the bootonly ISO image or + disc1. Do not download both, since + disc1 contains everything that the + bootonly ISO image contains. + + Use the bootonly ISO to perform a + network install over the Internet. Additional software + can be installed as needed using the Ports Collection as + described in . - The additional disc images are useful, but not essential, - especially if you have high-speed access to the Internet. + Use dvd1 to install &os; and a + selection of third-party packages from the disc.
- Write the CDs + Burn the Media - You must then write the CD images to disc. If you will be - doing this on another FreeBSD system then see - for more information (in - particular, and - ). - - If you will be doing this on another platform then you will - need to use whatever utilities exist to control your CD writer on - that platform. The images provided are in the standard ISO format, - which many CD writing applications support. + Next, write the downloaded image(s) to disc. If using + another &os; system, refer to + for instructions. + + If using another platform, use any burning utility + that exists for that platform. The images are in the + standard ISO format which most CD writing applications + support.
- If you are interested in building a customized - release of FreeBSD, please see the Release Engineering - Article. - + To build a customized release of &os;, refer to the + Release Engineering + Article.
Index: zh_TW.UTF-8/books/handbook/jails/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/jails/chapter.xml +++ zh_TW.UTF-8/books/handbook/jails/chapter.xml @@ -524,8 +524,8 @@ 相關章節。 當更新完成之後,就要進行 buildworld 程序,此外還要裝 sysutils/cpdup 套件。 我們將用 &man.portsnap.8; 來下載 &os; Ports Collection, - 在 Handbook 中對 Portsnap 章節 - 中有相關介紹,初學者可以看看。 + 在 Handbook 中 + 有相關介紹,初學者可以看看。 Index: zh_TW.UTF-8/books/handbook/kernelconfig/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/kernelconfig/chapter.xml +++ zh_TW.UTF-8/books/handbook/kernelconfig/chapter.xml @@ -242,7 +242,7 @@ 一旦真的砍了之後,你可能幾秒之後才會醒悟到: 你同時也砍掉自己改的 kernel 設定檔。 此外,也不要直接修改 GENERIC,因為下次你 - 更新 source tree時, + 更新 source tree 時, 它會被新版覆蓋,而相關修改也將隨之而逝。 你也可考慮把 kernel 設定檔改放到其他地方,然後再到 @@ -275,7 +275,7 @@ 會循序漸進地介紹。 - 若有從 &os; 計劃去 更新你的 source tree 的話, + 若有從 &os; 計劃去更新你的 source tree 的話, 則切記在進行任何升級之前,務必要察看 /usr/src/UPDATING。 這檔會介紹在更新過程中的重大議題或要注意的事項。 Index: zh_TW.UTF-8/books/handbook/mirrors/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/mirrors/chapter.xml +++ zh_TW.UTF-8/books/handbook/mirrors/chapter.xml @@ -1,3147 +1,882 @@ - - 取得 FreeBSD 的方式 + + 取得 &os; 的方式 - CDROM 及 DVD 發行商 + <acronym>CD</acronym> 及 + <acronym>DVD</acronym> 合集 - - 盒裝產品的零售處: - - FreeBSD 盒裝產品(含 FreeBSD 光碟及其他一些軟體、書面文件)的零售業者: - - - -
- CompUSA - WWW: http://www.compusa.com/ -
-
- - -
- Frys Electronics - WWW: http://www.frys.com/ -
-
-
-
- - - CD 及 DVD 合集 - - FreeBSD 光碟(CD 及 DVD)的網路零售業者: + &os; CDDVD 合集 + 可以從這些網路零售商取得: -
- BSD Mall by Daemon News - PO Box 161 - Nauvoo, IL 62354 - USA - Phone: +1 866 273-6255 - Fax: +1 217 453-9956 - Email: sales@bsdmall.com - WWW: http://www.bsdmall.com/freebsd1.html -
-
- - -
- BSD-Systems - Email: info@bsd-systems.co.uk - WWW: http://www.bsd-systems.co.uk -
-
- - -
- FreeBSD Mall, Inc. - 3623 Sanford Street - Concord, CA 94520-1405 +
&os; Mall, Inc. + 2420 Sand Creek Rd C-1 #347 + Brentwood, CA + 94513 USA - Phone: +1 925 674-0783 + Phone: +1 925 240-6652 Fax: +1 925 674-0821 Email: info@freebsdmall.com - WWW: http://www.freebsdmall.com/ -
- - - -
- Hinner EDV - St. Augustinus-Str. 10 - D-81825 München - Germany - Phone: (089) 428 419 - WWW: http://www.hinner.de/linux/freebsd.html -
-
- - -
- Ikarios - 22-24 rue Voltaire - 92000 Nanterre - France - WWW: http://ikarios.com/form/#freebsd -
-
- - -
- JMC Software - Ireland - Phone: 353 1 6291282 - WWW: http://www.thelinuxmall.com -
-
- - -
- Linux CD Mall - Private Bag MBE N348 - Auckland 1030 - New Zealand - Phone: +64 21 866529 - WWW: http://www.linuxcdmall.co.nz/ + WWW: http://www.freebsdmall.com/
-
- The Linux Emporium - Hilliard House, Lester Way - Wallingford - OX10 9TA - United Kingdom - Phone: +44 1491 837010 - Fax: +44 1491 837016 - WWW: http://www.linuxemporium.co.uk/products/freebsd/ -
-
- - -
- Linux+ DVD Magazine - Lewartowskiego 6 - Warsaw - 00-190 - Poland - Phone: +48 22 860 18 18 - Email: editors@lpmagazine.org - WWW: http://www.lpmagazine.org/ +
Getlinux + 78 Rue de la Croix Rochopt + Épinay-sous-Sénart + 91860 + France + Email: contact@getlinux.fr + WWW: http://www.getlinux.fr/
-
- Linux System Labs Australia - 21 Ray Drive - Balwyn North - VIC - 3104 - Australia - Phone: +61 3 9857 5918 - Fax: +61 3 9857 8974 - WWW: http://www.lsl.com.au -
-
- - -
- LinuxCenter.Ru - Galernaya Street, 55 - Saint-Petersburg - 190000 - Russia - Phone: +7-812-3125208 - Email: info@linuxcenter.ru - WWW: http://linuxcenter.ru/freebsd +
Dr. Hinner EDV + Kochelseestr. 11 + D-81371 München + Germany + Phone: (0177) 428 419 0 + Email: infow@hinner.de + WWW: http://www.hinner.de/linux/freebsd.html
- - - - - 經銷商(Distributors) - - 若你是區域經銷商,並想代理經銷 FreeBSD 光碟產品的話,請與下列的代理商聯繫: - - - -
- Cylogistics - 809B Cuesta Dr., #2149 - Mountain View, CA 94040 - USA - Phone: +1 650 694-4949 - Fax: +1 650 694-4953 - Email: sales@cylogistics.com - WWW: http://www.cylogistics.com/ -
-
- - -
- Ingram Micro - 1600 E. St. Andrew Place - Santa Ana, CA 92705-4926 - USA - Phone: 1 (800) 456-8000 - WWW: http://www.ingrammicro.com/ -
-
- - -
- Kudzu, LLC - 7375 Washington Ave. S. - Edina, MN 55439 - USA - Phone: +1 952 947-0822 - Fax: +1 952 947-0876 - Email: sales@kudzuenterprises.com -
-
- + - - - cvs-all release=cvs - - - The main FreeBSD CVS repository, including the - cryptography code. - - - - distrib release=cvs - - - Files related to the distribution and mirroring - of FreeBSD. - - - - - doc-all release=cvs - - Sources for the FreeBSD Handbook and other - documentation. This does not include files for - the FreeBSD web site. - - - - - ports-all release=cvs - - - The FreeBSD Ports Collection. - - - If you do not want to update the whole of - ports-all (the whole ports tree), - but use one of the subcollections listed below, - make sure that you always update - the ports-base subcollection! - Whenever something changes in the ports build - infrastructure represented by - ports-base, it is virtually certain - that those changes will be used by real - ports real soon. Thus, if you only update the - real ports and they use some of the new - features, there is a very high chance that their build - will fail with some mysterious error message. The - very first thing to do in this - case is to make sure that your - ports-base subcollection is up to - date. - - - - If you are going to be building your own local - copy of ports/INDEX, you - must accept - ports-all (the whole ports tree). - Building ports/INDEX with - a partial tree is not supported. See the - - FAQ. - - - - - ports-accessibility - release=cvs - - - Software to help disabled users. - - - - - ports-arabic - release=cvs - - - Arabic language support. - - - - - ports-archivers - release=cvs - - - Archiving tools. - - - - - ports-astro - release=cvs - - - Astronomical ports. - - - - - ports-audio - release=cvs - - - Sound support. - - - - - ports-base - release=cvs - - - The Ports Collection build infrastructure - - various files located in the - Mk/ and - Tools/ subdirectories of - /usr/ports. - - - Please see the important - warning above: you should - always update this - subcollection, whenever you update any part of - the FreeBSD Ports Collection! - - - - - - ports-benchmarks - release=cvs - - - Benchmarks. - - - - - ports-biology - release=cvs - - - Biology. - - - - - ports-cad - release=cvs - - - Computer aided design tools. - - - - - ports-chinese - release=cvs - - - Chinese language support. - - - - - ports-comms - release=cvs - - - Communication software. - - - - - ports-converters - release=cvs - - - character code converters. - - - - - ports-databases - release=cvs - - - Databases. - - - - - ports-deskutils - release=cvs - - - Things that used to be on the desktop - before computers were invented. - - - - - ports-devel - release=cvs - - - Development utilities. - - - - - ports-dns - release=cvs - - - DNS related software. - - - - - ports-editors - release=cvs - - - Editors. - - - - - ports-emulators - release=cvs - - - Emulators for other operating - systems. - - - - - ports-finance - release=cvs - - - Monetary, financial and related applications. - - - - - ports-ftp - release=cvs - - - FTP client and server utilities. - - - - - ports-games - release=cvs - - - Games. - - - - - ports-german - release=cvs - - - German language support. - - - - - ports-graphics - release=cvs - - - Graphics utilities. - - - - - ports-hebrew - release=cvs - - - Hebrew language support. - - - - - ports-hungarian - release=cvs - - - Hungarian language support. - - - - - ports-irc - release=cvs - - - Internet Relay Chat utilities. - - - - - ports-japanese - release=cvs - - - Japanese language support. - - - - - ports-java - release=cvs - - - &java; utilities. - - - - - ports-korean - release=cvs - - - Korean language support. - - - - - ports-lang - release=cvs - - - Programming languages. - - - - - ports-mail - release=cvs - - - Mail software. - - - - - ports-math - release=cvs - - - Numerical computation software. - - - - - ports-mbone - release=cvs - - - MBone applications. - - - - - ports-misc - release=cvs - - - Miscellaneous utilities. - - - - - ports-multimedia - release=cvs - - - Multimedia software. - - - - - ports-net - release=cvs - - - Networking software. - - - - - ports-net-im - release=cvs - - - Instant messaging software. - - - - - ports-net-mgmt - release=cvs - - - Network management software. - - - - - ports-net-p2p - release=cvs - - - Peer to peer networking. - - - - - ports-news - release=cvs - - - USENET news software. - - - - - ports-palm - release=cvs - - - Software support for Palm - series. - - - - - ports-polish - release=cvs - - - Polish language support. - - - - - ports-portuguese - release=cvs - - - Portuguese language support. - - - - - ports-print - release=cvs - - - Printing software. - - - - - ports-russian - release=cvs - - - Russian language support. - - - - - ports-science - release=cvs - - - Science. - - - - - ports-security - release=cvs - - - Security utilities. - - - - - ports-shells - release=cvs - - - Command line shells. - - - - - ports-sysutils - release=cvs - - - System utilities. - - - - - ports-textproc - release=cvs - - - text processing utilities (does not - include desktop publishing). - - - - - ports-ukrainian - release=cvs - - - Ukrainian language support. - - - - - ports-vietnamese - release=cvs - - - Vietnamese language support. - - - - - ports-www - release=cvs - - - Software related to the World Wide - Web. - - - - - ports-x11 - release=cvs - - - Ports to support the X window - system. - - - - - ports-x11-clocks - release=cvs - - - X11 clocks. - - - - - ports-x11-fm - release=cvs - - - X11 file managers. - - - - - ports-x11-fonts - release=cvs - - - X11 fonts and font utilities. - - - - - ports-x11-toolkits - release=cvs - - - X11 toolkits. - - - - - ports-x11-servers - release=cvs - - - X11 servers. - - - - - ports-x11-themes - release=cvs - - - X11 themes. - - - - - ports-x11-wm - release=cvs - - - X11 window managers. - - - - - - - - projects-all release=cvs - - Sources for the FreeBSD projects repository. - - - - - src-all release=cvs - - - The main FreeBSD sources, including the - cryptography code. - - - - src-base - release=cvs - - - Miscellaneous files at the top of - /usr/src. - - - - - src-bin - release=cvs - - - User utilities that may be needed in - single-user mode - (/usr/src/bin). - - - - - src-contrib - release=cvs - - - Utilities and libraries from outside the - FreeBSD project, used relatively unmodified - (/usr/src/contrib). - - - - - src-crypto release=cvs - - - Cryptography utilities and libraries from - outside the FreeBSD project, used relatively - unmodified - (/usr/src/crypto). - - - - - src-eBones release=cvs - - - Kerberos and DES - (/usr/src/eBones). Not - used in current releases of FreeBSD. - - - - - src-etc - release=cvs - - - System configuration files - (/usr/src/etc). - - - - - src-games - release=cvs - - - Games - (/usr/src/games). - - - - - src-gnu - release=cvs - - - Utilities covered by the GNU Public - License (/usr/src/gnu). - - - - - src-include - release=cvs - - - Header files - (/usr/src/include). - - - - - src-kerberos5 - release=cvs - - - Kerberos5 security package - (/usr/src/kerberos5). - - - - - src-kerberosIV - release=cvs - - - KerberosIV security package - (/usr/src/kerberosIV). - - - - - src-lib - release=cvs - - - Libraries - (/usr/src/lib). - - - - - src-libexec - release=cvs - - - System programs normally executed by other - programs - (/usr/src/libexec). - - - - - src-release - release=cvs - - - Files required to produce a FreeBSD - release - (/usr/src/release). - - - - - src-sbin release=cvs - - - System utilities for single-user mode - (/usr/src/sbin). - - - - - src-secure - release=cvs - - - Cryptographic libraries and commands - (/usr/src/secure). - - - - - src-share - release=cvs - - - Files that can be shared across multiple - systems - (/usr/src/share). - - - - - src-sys - release=cvs - - - The kernel - (/usr/src/sys). - - - - - src-sys-crypto - release=cvs - - - Kernel cryptography code - (/usr/src/sys/crypto). - - - - - src-tools - release=cvs - - - Various tools for the maintenance of - FreeBSD - (/usr/src/tools). - - - - - src-usrbin - release=cvs - - - User utilities - (/usr/src/usr.bin). - - - - - src-usrsbin - release=cvs - - - System utilities - (/usr/src/usr.sbin). - - - - - - - - www release=cvs - - - The sources for the FreeBSD WWW site. - - - - - - - - distrib release=self - - - The CVSup server's own - configuration files. Used by CVSup - mirror sites. - - - - - gnats release=current - - - The GNATS bug-tracking database. - - - - - mail-archive release=current - - - FreeBSD mailing list archive. - - - - - www release=current - - - The pre-processed FreeBSD WWW site files (not the - source files). Used by WWW mirror sites. - - - -
- - - For More Information - - For the CVSup FAQ and other - information about CVSup, see - The - CVSup Home Page. - - Most FreeBSD-related discussion of - CVSup takes place on the - &a.hackers;. New versions of the software are announced there, - as well as on the &a.announce;. - - Questions and bug reports should be addressed to the author - of the program at cvsup-bugs@polstra.com. - - - - CVSup Sites - - CVSup servers for FreeBSD are running - at the following sites: - - &chap.mirrors.cvsup.index.inc; - - &chap.mirrors.lastmod.inc; - - &chap.mirrors.cvsup.inc; - - - - - Using Portsnap - - - Portsnap 簡介 - - Portsnap is a system for securely - distributing the &os; ports tree. Approximately once an hour, - a snapshot of the ports tree is generated, - repackaged, and cryptographically signed. The resulting files - are then distributed via HTTP. - - Like CVSup, - Portsnap uses a - pull model of updating: The packaged and - signed ports trees are placed on a web server which waits - passively for clients to request files. Users must either run - &man.portsnap.8; manually to download updates - or set up a &man.cron.8; job to download updates - automatically on a regular basis. - - For technical reasons, Portsnap - does not update the live ports tree in - /usr/ports/ directly; instead, it works - via a compressed copy of the ports tree stored in - /var/db/portsnap/ by default. This - compressed copy is then used to update the live ports tree. - - - If Portsnap is installed from - the &os; Ports Collection, then the default location for its - compressed snapshot will be /usr/local/portsnap/ - instead of /var/db/portsnap/. - - - - - Installation - - On &os; 6.0 and more recent versions, - Portsnap is contained in the &os; - base system. On older versions of &os;, it can be installed - using the sysutils/portsnap - port. - - - - Portsnap Configuration - - Portsnap's operation is controlled - by the /etc/portsnap.conf configuration - file. For most users, the default configuration file will - suffice; for more details, consult the &man.portsnap.conf.5; - manual page. - - - If Portsnap is installed from - the &os; Ports Collection, it will use the configuration file - /usr/local/etc/portsnap.conf instead of - /etc/portsnap.conf. This configuration - file is not created when the port is installed, but a sample - configuration file is distributed; to copy it into place, run - the following command: - - &prompt.root; cd /usr/local/etc && cp portsnap.conf.sample portsnap.conf - - - - - Running <application>Portsnap</application> for the First - Time - - The first time &man.portsnap.8; is run, - it will need to download a compressed snapshot of the entire - ports tree into /var/db/portsnap/ (or - /usr/local/portsnap/ if - Portsnap was installed from the - Ports Collection). For the beginning of 2006 this is approximately a 41 MB - download. - - &prompt.root; portsnap fetch - - Once the compressed snapshot has been downloaded, a - live copy of the ports tree can be extracted into - /usr/ports/. This is necessary even if a - ports tree has already been created in that directory (e.g., by - using CVSup), since it establishes a - baseline from which portsnap can - determine which parts of the ports tree need to be updated - later. - - &prompt.root; portsnap extract - - - In the default installation - /usr/ports is not - created. If you run &os; 6.0-RELEASE, it should be created before - portsnap is used. On more recent - versions of &os; or Portsnap, - this operation will be done automatically at first use - of the portsnap command. - - - - - Updating the Ports Tree - - After an initial compressed snapshot of the ports tree has - been downloaded and extracted into /usr/ports/, - updating the ports tree consists of two steps: - fetching updates to the compressed - snapshot, and using them to update the - live ports tree. These two steps can be specified to - portsnap as a single command: - - &prompt.root; portsnap fetch update - - - Some older versions of portsnap - do not support this syntax; if it fails, try instead the - following: - - &prompt.root; portsnap fetch -&prompt.root; portsnap update - - - - - Running Portsnap from cron - - In order to avoid problems with flash crowds - accessing the Portsnap servers, - portsnap fetch will not run from - a &man.cron.8; job. Instead, a special - portsnap cron command exists, which - waits for a random duration up to 3600 seconds before fetching - updates. - - In addition, it is strongly recommended that - portsnap update not be run from a - cron job, since it is liable to cause - major problems if it happens to run at the same time as a port - is being built or installed. However, it is safe to update - the ports' INDEX files, and this can be done by passing the - flag to - portsnap. (Obviously, if - portsnap -I update is run from - cron, then it will be necessary to run - portsnap update without the - flag at a later time in order to update the rest of the tree.) - - Adding the following line to /etc/crontab - will cause portsnap to update its - compressed snapshot and the INDEX files in - /usr/ports/, and will send an email if any - installed ports are out of date: - - 0 3 * * * root portsnap -I cron update && pkg_version -vIL= - - - If the system clock is not set to the local time zone, - please replace 3 with a random - value between 0 and 23, in order to spread the load on the - Portsnap servers more evenly. - - - Some older versions of portsnap - do not support listing multiple commands (e.g., cron update) - in the same invocation of portsnap. If - the line above fails, try replacing - portsnap -I cron update with - portsnap cron && portsnap -I update. - - - - - - CVS Tags - - When obtaining or updating sources using - cvs or - CVSup, a revision tag must be specified. - A revision tag refers to either a particular line of &os; - development, or a specific point in time. The first type are called - branch tags, and the second type are called - release tags. - - - Branch Tags - - All of these, with the exception of HEAD (which - is always a valid tag), only apply to the src/ - tree. The ports/, doc/, and - www/ trees are not branched. - - - - HEAD - - - Symbolic name for the main line, or FreeBSD-CURRENT. - Also the default when no revision is specified. - - In CVSup, this tag is represented - by a . (not punctuation, but a literal - . character). - - - In CVS, this is the default when no revision tag is - specified. It is usually not - a good idea to checkout or update to CURRENT sources - on a STABLE machine, unless that is your intent. - - - - - - RELENG_6 - - - The line of development for FreeBSD-6.X, also known - as FreeBSD 6-STABLE - - - - - RELENG_6_1 - - - The release branch for FreeBSD-6.1, used only for - security advisories and other critical fixes. - - - - - RELENG_6_0 - - - The release branch for FreeBSD-6.0, used only for - security advisories and other critical fixes. - - - - - RELENG_5 - - - The line of development for FreeBSD-5.X, also known - as FreeBSD 5-STABLE. - - - - - RELENG_5_5 - - - The release branch for FreeBSD-5.5, used only - for security advisories and other critical fixes. - - - - - RELENG_5_4 - - - The release branch for FreeBSD-5.4, used only - for security advisories and other critical fixes. - - - - - RELENG_5_3 - - - The release branch for FreeBSD-5.3, used only - for security advisories and other critical fixes. - - - - - RELENG_5_2 - - - The release branch for FreeBSD-5.2 and FreeBSD-5.2.1, used only - for security advisories and other critical fixes. - - - - - RELENG_5_1 - - - The release branch for FreeBSD-5.1, used only - for security advisories and other critical fixes. - - - - - RELENG_5_0 - - - The release branch for FreeBSD-5.0, used only - for security advisories and other critical fixes. - - - - - RELENG_4 - - - The line of development for FreeBSD-4.X, also known - as FreeBSD 4-STABLE. - - - - - RELENG_4_11 - - - The release branch for FreeBSD-4.11, used only - for security advisories and other critical fixes. - - - - - RELENG_4_10 - - - The release branch for FreeBSD-4.10, used only - for security advisories and other critical fixes. - - - - - RELENG_4_9 - - - The release branch for FreeBSD-4.9, used only - for security advisories and other critical fixes. - - - - - RELENG_4_8 - - - The release branch for FreeBSD-4.8, used only - for security advisories and other critical fixes. - - - - - RELENG_4_7 - - - The release branch for FreeBSD-4.7, used only - for security advisories and other critical fixes. - - - - - RELENG_4_6 - - - The release branch for FreeBSD-4.6 and FreeBSD-4.6.2, - used only for security advisories and other - critical fixes. - - - - - RELENG_4_5 - - - The release branch for FreeBSD-4.5, used only - for security advisories and other critical fixes. - - - - - RELENG_4_4 - - - The release branch for FreeBSD-4.4, used only - for security advisories and other critical fixes. - - - - - RELENG_4_3 - - - The release branch for FreeBSD-4.3, used only - for security advisories and other critical fixes. - - - - - RELENG_3 - - - The line of development for FreeBSD-3.X, also known - as 3.X-STABLE. - - - - - RELENG_2_2 - - - The line of development for FreeBSD-2.2.X, also known - as 2.2-STABLE. This branch is mostly obsolete. - - - - - - - Release Tags - - These tags refer to a specific point in time when a particular - version of &os; was released. The release engineering process is - documented in more detail by the - Release Engineering - Information and - Release - Process documents. - The src tree uses tag names that - start with RELENG_ tags. - The ports and - doc trees use tags whose names - begin with RELEASE tags. - Finally, the www tree is not - tagged with any special name for releases. - - - - RELENG_6_1_0_RELEASE - - - FreeBSD 6.1 - - - - - RELENG_6_0_0_RELEASE - - - FreeBSD 6.0 - - - - - RELENG_5_5_0_RELEASE - - - FreeBSD 5.5 - - - - - RELENG_5_4_0_RELEASE - - - FreeBSD 5.4 - - - - - RELENG_4_11_0_RELEASE + +
Linux Center + Galernaya Street, 55 + Saint-Petersburg + 190000 + Russia + Phone: +7-812-309-06-86 + Email: info@linuxcenter.ru + WWW: http://linuxcenter.ru/shop/freebsd +
+
+ +
- - FreeBSD 4.11 - - + + <acronym>FTP</acronym> 站 - - RELENG_5_3_0_RELEASE + The official sources for &os; are available via anonymous + FTP from a worldwide set of mirror sites. + The site ftp://ftp.FreeBSD.org/pub/FreeBSD/ + is well connected and allows a large number of connections to + it, but you are probably better off finding a + closer mirror site (especially if you decide to + set up some sort of mirror site). + + Additionally, &os; is available via anonymous + FTP from the following mirror sites. If you + choose to obtain &os; via anonymous FTP, + please try to use a site near you. The mirror sites listed as + Primary Mirror Sites typically have the entire + &os; archive (all the currently available versions for each of + the architectures) but you will probably have faster download + times from a site that is in your country or region. The + regional sites carry the most recent versions for the most + popular architecture(s) but might not carry the entire &os; + archive. All sites provide access via anonymous + FTP but some sites also provide access via + other methods. The access methods available for each site are + provided in parentheses after the hostname. - - FreeBSD 5.3 - - + &chap.mirrors.ftp.index.inc; - - RELENG_4_10_0_RELEASE + &chap.mirrors.lastmod.inc; - - FreeBSD 4.10 - - + &chap.mirrors.ftp.inc; + - - RELENG_5_2_1_RELEASE + + Using CTM - - FreeBSD 5.2.1 - - + + CTM + + + CTM is a method for keeping a + remote directory tree in sync with a central one. It is built + into &os; and can be used to synchronize a system with &os;'s + source repositories. It supports synchronization of an entire + repository or just a specified set of branches. + + CTM is specifically designed for + use on lousy or non-existent TCP/IP connections and provides + the ability for changes to be automatically sent by email. It + requires the user to obtain up to three deltas per day for the + most active branches. Update sizes are always kept as small as + possible and are typically less than 5K. About one in very ten + updates is 10-50K in size, and there will occasionally be an + update larger than 100K+. + + When using CTM to track &os; + development, refer to the caveats related to working directly + from the development sources rather than a pre-packaged release. + These are discussed in Tracking + a Development Branch. + + Little documentation exists on the process of creating + deltas or using CTM for other + purposes. Contact the &a.ctm-users.name; mailing list for + answers to questions on using + CTM. - - RELENG_5_2_0_RELEASE + + Getting Deltas - - FreeBSD 5.2 - - + The deltas used by + CTM can be obtained either through + anonymous FTP or email. + + FTP deltas can be obtained from the + following mirror sites. When using anonymous + FTP to obtain + CTM deltas, select a mirror that is + geographically nearby. In case of problems, contact the + &a.ctm-users.name; mailing list. - - RELENG_4_9_0_RELEASE + + + California, Bay Area, official source - - FreeBSD 4.9 - - + + + + ftp://ftp.FreeBSD.org/pub/FreeBSD/development/CTM/ + + + ftp://ftp.FreeBSD.org/pub/FreeBSD/CTM/ + + + + - - RELENG_5_1_0_RELEASE + + South Africa, backup server for old deltas - - FreeBSD 5.1 - - + + + + ftp://ftp.za.FreeBSD.org/pub/FreeBSD/CTM/ + + + + - - RELENG_4_8_0_RELEASE + + Taiwan/R.O.C. - - FreeBSD 4.8 - - + + + + ftp://ctm.tw.FreeBSD.org/pub/FreeBSD/development/CTM/ + + + + ftp://ctm2.tw.FreeBSD.org/pub/FreeBSD/development/CTM/ + + + + ftp://ctm3.tw.FreeBSD.org/pub/FreeBSD/development/CTM/ + + + + + - - RELENG_5_0_0_RELEASE + To instead receive deltas through email, subscribe to one + of the ctm-src distribution lists available + from http://lists.freebsd.org/mailman/listinfo. + For example, &a.ctm-src-cur.name; supports the head + development branch and &a.ctm-src-9.name; supports the 9.X + release branch. + + As CTM updates arrive through + email, use ctm_rmail to unpack and apply + them. This command can be run directly from an entry in + /etc/aliases in order to automate this + process. Refer to &man.ctm.rmail.1; for more details. - - FreeBSD 5.0 - - + + Regardless of the method which is used to get deltas, + CTM users should subscribe + to the &a.ctm-announce.name; mailing list as this is the + only mechanism by which CTM + announcements are posted. + + - - RELENG_4_7_0_RELEASE + + <application>CTM</application> Usage - - FreeBSD 4.7 - - + Before CTM deltas can be used + for the first time, a starting point must be produced. - - RELENG_4_6_2_RELEASE + One method is to apply a starter delta to + an empty directory. A starter delta can be recognized by the + XEmpty in its name, such as + src-cur.3210XEmpty.gz. The designation + following the X corresponds to the origin + of the initial seed, where + Empty is an empty directory. As a rule, + a base transition from Empty is produced + every 100 deltas. Be aware that starter deltas are large and + 70 to 80 Megabytes of gzip'd data is common + for the XEmpty deltas. + + Another method is to copy or extract an initial source + from a RELEASE media as this can save a significant transfer + of data from the Internet. + + Once a base delta has been created, apply all deltas with + higher numbers. To apply the deltas: + + &prompt.root; cd /directory/to/store/the/stuff +&prompt.root; ctm -v -v /directory/which/stores/the/deltas/src-xxx.* + + Multiple deltas can be applied with a single command as + they will be processed one at a time and any deltas that are + already applied will be ignored. + CTM understands + gzip compressed deltas, which saves disk + space. + + To verify a delta without applying it, include + in the command line. + CTM will not actually modify the + local tree but will instead verify the integrity of the delta + to see if it would apply cleanly. Refer to &man.ctm.1; for + more information about available options and an overview of + the process CTM uses when applying + deltas. + + To keep the local source tree up-to-date, every time a + new delta becomes available, apply it through + CTM. + + Once applied, it is recommended to not delete the deltas + if it is a burden to download them again. This way, a local + copy is available in case it is needed for future disaster + recovery. + - - FreeBSD 4.6.2 - - + + Keeping Local Changes - - RELENG_4_6_1_RELEASE + Developers often experiment with and + change files in their local source tree. + CTM supports local modifications in + a limited way: before checking for the presence of a file, + it first looks for a file with the same name and a + .ctm extension. If this file exists, + CTM will operate on it instead of + the original filename. + + This behavior provides a simple way to maintain local + changes. Before modifying a file, make a copy with a + .ctm suffix. Make any changes to the + original filename, knowing that + CTM will only apply updates to the + file with the .ctm suffix. + - - FreeBSD 4.6.1 - - + + Other <application>CTM</application> Options - - RELENG_4_6_0_RELEASE + + + Finding Out Exactly What Would Be Touched by an + Update - - FreeBSD 4.6 - - + + To determine the list of changes that + CTM will make to the local + source repository, use . This option + is useful for creating logs of the changes or when + performing pre- or post-processing on any of the + modified files. + + - - RELENG_4_5_0_RELEASE + + Making Backups Before Updating - - FreeBSD 4.5 - - + + To backup all of the files that would be changed by + a CTM update, specify + . This + option tells CTM to backup + all files touched by the applied + CTM delta to + backup-file. + + - - RELENG_4_4_0_RELEASE + + Restricting the Files Touched by an Update - - FreeBSD 4.4 - - + + To restrict the scope of a given + CTM update, or to extract + just a few files from a sequence of deltas, filtering + regular expressions can be specified using + , which specifies which files to + process, or , which specifies which + files to ignore. + + For example, to extract an up-to-date copy of + lib/libc/Makefile from a collection + of saved CTM deltas: + + &prompt.root; cd /directory/to/extract/to/ +&prompt.root; ctm -e '^lib/libc/Makefile' /directory/which/stores/the/deltas/src-xxx.* + + For every file specified in a + CTM delta, + and are + applied in the order given on the command line. A file + is processed by CTM only if + it is marked as eligible after all + and options are applied. + + + + + + - - FreeBSD 4.1.1 - - + + Using <application>Subversion</application> - - RELENG_4_1_0_RELEASE + + Subversion + + + + Introduction + + As of July 2012, &os; uses + Subversion as the primary version + control system for storing all of &os;'s source code, + documentation, and the Ports Collection. - - FreeBSD 4.1 - - + + Subversion is generally a + developer tool. Most users should use + freebsd-update () to update + the &os; base system, and portsnap () to update the &os; Ports + Collection. + - - RELENG_4_0_0_RELEASE + This chapter demonstrates how to install + Subversion on a &os; system and + then use it to create a local copy of a &os; repository. It + includes a list of the available &os; + Subversion mirrors and resources to + additional information on how to use + Subversion. + - - FreeBSD 4.0 - - + + Installation - - RELENG_3_5_0_RELEASE + Subversion must be installed + before it can be used to check out the contents of any of the + repositories. If a copy of the ports tree is already present, + one can install Subversion like + this: - - FreeBSD-3.5 - - + &prompt.root; cd /usr/ports/devel/subversion +&prompt.root; make install clean - - RELENG_3_4_0_RELEASE + If the ports tree is not available, + Subversion can be installed as a + package: - - FreeBSD-3.4 - - + &prompt.root; pkg install devel/subversion + - - RELENG_3_3_0_RELEASE + + Running <application>Subversion</application> - - FreeBSD-3.3 - - + The svn command is used to fetch a + clean copy of the sources into a local directory. The files + in this directory are called a local working + copy. - - RELENG_3_2_0_RELEASE + + Move or delete the local directory before + using checkout. - - FreeBSD-3.2 - - + Checkout over an existing + non-svn directory can cause conflicts + between the existing files and those brought in from the + repository. + - - RELENG_3_1_0_RELEASE + Subversion uses + URLs to designate a repository, taking the + form of protocol://hostname/path. + Mirrors may support different protocols as specified below. + The first component of the path is the &os; repository to + access. There are three different repositories, + base for the &os; base system source code, + ports for the Ports Collection, and + doc for documentation. For example, the + URL + svn://svn0.us-east.FreeBSD.org/ports/head/ + specifies the main branch of the ports repository on the + svn0.us-east.FreeBSD.org + mirror, using the svn protocol. - - FreeBSD-3.1 - - + A checkout from a given repository is performed with a + command like this: - - RELENG_3_0_0_RELEASE + &prompt.root; svn checkout svn-mirror/repository/branch lwcdir + + where: + - FreeBSD-3.0 + svn-mirror is a URL for one + of the Subversion mirror + sites. - - - - RELENG_2_2_8_RELEASE - FreeBSD-2.2.8 + repository is one of the + Project repositories, i.e., base, + ports, or + doc. - - - - RELENG_2_2_7_RELEASE - FreeBSD-2.2.7 + branch depends on the + repository used. ports and + doc are mostly updated in the + head branch, while + base maintains the latest version of + -CURRENT under head and the respective + latest versions of the -STABLE branches under + stable/8 (for + 8.x), + stable/9 + (9.x) and + stable/10 + (10.x). - - - - RELENG_2_2_6_RELEASE - FreeBSD-2.2.6 + lwcdir is the target + directory where the contents of the specified branch + should be placed. This is usually + /usr/ports for + ports, + /usr/src for + base, and + /usr/doc for + doc. - + - - RELENG_2_2_5_RELEASE + This example checks out the Ports Collection from the + western US repository using the HTTPS + protocol, placing the local working copy in + /usr/ports. If + /usr/ports is already + present but was not created by svn, + remember to rename or delete it before the checkout. + + &prompt.root; svn checkout https://svn0.us-west.FreeBSD.org/ports/head /usr/ports + + Because the initial checkout has to download the full + branch of the remote repository, it can take a while. Please + be patient. + + After the initial checkout, the local working copy can be + updated by running: + + &prompt.root; svn update lwcdir + + To update + /usr/ports created in + the example above, use: + + &prompt.root; svn update /usr/ports + + The update is much quicker than a checkout, only + transferring files that have changed. + + An alternate way of updating the local working copy after + checkout is provided by the Makefile in + the /usr/ports, + /usr/src, and + /usr/doc directories. + Set SVN_UPDATE and use the + update target. For example, to + update /usr/src: + + &prompt.root; cd /usr/src +&prompt.root; make update SVN_UPDATE=yes + + + + <application>Subversion</application> Mirror + Sites - - FreeBSD-2.2.5 - - + + Subversion Repository + Mirror Sites + - - RELENG_2_2_2_RELEASE + All mirrors carry all repositories. - - FreeBSD-2.2.2 - - + The master &os; Subversion + server, svn.FreeBSD.org, is + publicly accessible, read-only. That may change in the + future, so users are encouraged to use one of the official + mirrors. To view the &os; + Subversion repositories through a + browser, use http://svnweb.FreeBSD.org/. - - RELENG_2_2_1_RELEASE + + The &os; Subversion mirror + network is still in its early days, and will likely change. + Do not count on this list of mirrors being static. In + particular, the SSL certificates of the + servers will likely change at some point. + - - FreeBSD-2.2.1 - - + + + + + + + + + Name + + Protocols + + Location + + SSL Fingerprint + + + + + + svn0.us-west.FreeBSD.org + + svn, http, + https + + USA, California + + SHA1 + 1C:BD:85:95:11:9F:EB:75:A5:4B:C8:A3:FE:08:E4:02:73:06:1E:61 + + + + svn0.us-east.FreeBSD.org + + svn, http, + https, + rsync + + USA, New Jersey + + SHA1 + 1C:BD:85:95:11:9F:EB:75:A5:4B:C8:A3:FE:08:E4:02:73:06:1E:61 + + + + svn0.eu.FreeBSD.org + + svn, http, + https, + rsync + + Europe, UK + + SHA1 + 39:B0:53:35:CE:60:C7:BB:00:54:96:96:71:10:94:BB:CE:1C:07:A7 + + + + svn0.ru.FreeBSD.org + + svn, http, + https, + rsync + + Russia, Moscow + + SHA1 + F6:44:AA:B9:03:89:0E:3E:8C:4D:4D:14:F0:27:E6:C7:C1:8B:17:C5 + + + + + + HTTPS is the preferred protocol, + providing protection against another computer pretending to be + the &os; mirror (commonly known as a man in the + middle attack) or otherwise trying to send bad + content to the end user. + + On the first connection + to an HTTPS mirror, the user will be asked + to verify the server fingerprint: + + Error validating server certificate for 'https://svn0.us-west.freebsd.org:443': + - The certificate is not issued by a trusted authority. Use the + fingerprint to validate the certificate manually! + - The certificate hostname does not match. +Certificate information: + - Hostname: svnmir.ysv.FreeBSD.org + - Valid: from Jul 29 22:01:21 2013 GMT until Dec 13 22:01:21 2040 GMT + - Issuer: clusteradm, FreeBSD.org, (null), CA, US (clusteradm@FreeBSD.org) + - Fingerprint: 1C:BD:85:95:11:9F:EB:75:A5:4B:C8:A3:FE:08:E4:02:73:06:1E:61 +(R)eject, accept (t)emporarily or accept (p)ermanently? + + Compare the fingerprint shown to those listed in the table + above. If the fingerprint matches, the server security + certificate can be accepted temporarily or permanently. A + temporary certificate will expire after a single session with + the server, and the verification step will be repeated on the + next connection. Accepting the certificate permanently will + store the authentication credentials in + ~/.subversion/auth/ and the user will not + be asked to verify the fingerprint again until the certificate + expires. + + If https cannot be used due to firewall + or other problems, svn is the next choice, + with slightly faster transfers. When neither can be used, use + http. + - - RELENG_2_2_0_RELEASE + + For More Information - - FreeBSD-2.2.0 - - - + For other information about using + Subversion, please see the + Subversion Book, titled + Version + Control with Subversion, or the Subversion + Documentation. - - AFS Sites + + Using <application>rsync</application> - AFS servers for FreeBSD are running at the following sites: + The following sites make &os; available through the rsync + protocol. The rsync utility works in + much the same way as the &man.rcp.1; command, but has more + options and uses the rsync remote-update protocol which + transfers only the differences between two sets of files, thus + greatly speeding up the synchronization over the network. This + is most useful if you are a mirror site for the &os; + FTP server, or the CVS repository. The + rsync suite is available for many + operating systems, on &os;, see the net/rsync + port or use the package. - Sweden + Czech Republic - The path to the files are: - /afs/stacken.kth.se/ftp/pub/FreeBSD/ + rsync://ftp.cz.FreeBSD.org/ - stacken.kth.se # Stacken Computer Club, KTH, Sweden -130.237.234.43 #hot.stacken.kth.se -130.237.237.230 #fishburger.stacken.kth.se -130.237.234.3 #milko.stacken.kth.se + Available collections: + + + ftp: A partial mirror of the &os; + FTP server. + - Maintainer ftp@stacken.kth.se + + &os;: A full mirror of the &os; + FTP server. + + - - - - rsync Sites + + Netherlands - The following sites make FreeBSD available through the rsync - protocol. The rsync utility works in - much the same way as the &man.rcp.1; command, - but has more options and uses the rsync remote-update protocol - which transfers only the differences between two sets of files, - thus greatly speeding up the synchronization over the network. - This is most useful if you are a mirror site for the - FreeBSD FTP server, or the CVS repository. The - rsync suite is available for many - operating systems, on FreeBSD, see the - net/rsync - port or use the package. + + rsync://ftp.nl.FreeBSD.org/ + + Available collections: + + + &os;: A full mirror of the &os; + FTP server. + + + + - - Czech Republic + Russia - rsync://ftp.cz.FreeBSD.org/ + rsync://ftp.mtu.ru/ Available collections: + - ftp: A partial mirror of the FreeBSD FTP - server. - FreeBSD: A full mirror of the FreeBSD FTP - server. + + &os;: A full mirror of the &os; + FTP server. + + + + &os;-Archive: The mirror of &os; Archive + FTP server. + - Germany + Sweden - rsync://grappa.unix-ag.uni-kl.de/ + rsync://ftp4.se.freebsd.org/ Available collections: - freebsd-cvs: The full FreeBSD CVS - repository. + + &os;: A full mirror of the &os; + FTP server. + - This machine also mirrors the CVS repositories of the - NetBSD and the OpenBSD projects, among others. - Netherlands + Taiwan - rsync://ftp.nl.FreeBSD.org/ + rsync://ftp.tw.FreeBSD.org/ + + rsync://ftp2.tw.FreeBSD.org/ + + rsync://ftp6.tw.FreeBSD.org/ Available collections: - vol/4/freebsd-core: A full mirror of the - FreeBSD FTP server. + + &os;: A full mirror of the &os; + FTP server. + @@ -3150,12 +885,14 @@ United Kingdom - rsync://rsync.mirror.ac.uk/ + rsync://rsync.mirrorservice.org/ Available collections: - ftp.FreeBSD.org: A full mirror of the - FreeBSD FTP server. + + ftp.freebsd.org: A full mirror of the &os; + FTP server. + @@ -3166,22 +903,31 @@ rsync://ftp-master.FreeBSD.org/ - This server may only be used by FreeBSD primary mirror + This server may only be used by &os; primary mirror sites. + Available collections: + - FreeBSD: The master archive of the FreeBSD - FTP server. - acl: The FreeBSD master ACL - list. + + &os;: The master archive of the &os; + FTP server. + + + + acl: The &os; master ACL list. + rsync://ftp13.FreeBSD.org/ Available collections: + - FreeBSD: A full mirror of the FreeBSD FTP - server. + + &os;: A full mirror of the &os; + FTP server. + Index: zh_TW.UTF-8/books/handbook/network-servers/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/network-servers/chapter.xml +++ zh_TW.UTF-8/books/handbook/network-servers/chapter.xml @@ -1,61 +1,79 @@ - - Network Servers + + - + 網路伺服器 概述 - This chapter will cover some of the more frequently used - network services on &unix; systems. We will cover how to - install, configure, test, and maintain many different types of + This chapter covers some of the more frequently used network + services on &unix; systems. This includes installing, + configuring, testing, and maintaining many different types of network services. Example configuration files are included - throughout this chapter for you to benefit from. + throughout this chapter for reference. - After reading this chapter, you will know: + By the end of this chapter, readers will know: - How to manage the inetd daemon. - How to set up a network file system. + How to set up the Network File System + (NFS). - How to set up a network information server for sharing + How to set up the Network Information Server + (NIS) for centralizing and sharing user accounts. - How to set up automatic network settings using DHCP. + How to set &os; up to act as an LDAP + server or client + + + + How to set up automatic network settings using + DHCP. - How to set up a domain name server. + How to set up a Domain Name Server + (DNS). - How to set up the Apache HTTP Server. + How to set up the Apache + HTTP Server. - How to set up a File Transfer Protocol (FTP) Server. + How to set up a File Transfer Protocol + (FTP) server. @@ -65,268 +83,150 @@ How to synchronize the time and date, and set up a - time server, with the NTP protocol. + time server using the Network Time Protocol + (NTP). + + How to set up iSCSI. + - Before reading this chapter, you should: + This chapter assumes a basic knowledge of: - Understand the basics of the - /etc/rc scripts. + /etc/rc scripts. - Be familiar with basic network terminology. + Network terminology. - Know how to install additional third-party - software (). + Installation of additional third-party + software (). - - The <application>inetd</application> <quote>Super-Server</quote> + The <application>inetd</application> + Super-Server + + - - An external service is a daemon outside of - inetd, which is invoked when a - connection is received for it. On the other hand, an - internal service is one that - inetd has the facility of - offering within itself. - + The &man.inetd.8; daemon is sometimes referred to as a + Super-Server because it manages connections for many services. + Instead of starting multiple applications, only the + inetd service needs to be started. + When a connection is received for a service that is managed by + inetd, it determines which program + the connection is destined for, spawns a process for that + program, and delegates the program a socket. Using + inetd for services that are not + heavily used can reduce system load, when compared to running + each daemon individually in stand-alone mode. + + Primarily, inetd is used to + spawn other daemons, but several trivial protocols are handled + internally, such as chargen, + auth, + time, + echo, + discard, and + daytime. - + This section covers the basics of configuring + inetd. - <filename>inetd.conf</filename> + Configuration File Configuration of inetd is - controlled through the /etc/inetd.conf - file. + done by editing /etc/inetd.conf. Each + line of this configuration file represents an application + which can be started by inetd. By + default, every line starts with a comment + (#), meaning that + inetd is not listening for any + applications. To configure inetd + to listen for an application's connections, remove the + # at the beginning of the line for that + application. + + After saving your edits, configure + inetd to start at system boot by + editing /etc/rc.conf: + + inetd_enable="YES" + + To start inetd now, so that it + listens for the service you configured, type: + + &prompt.root; service inetd start + + Once inetd is started, it needs + to be notified whenever a modification is made to + /etc/inetd.conf: + + + Reloading the <application>inetd</application> + Configuration File + + &prompt.root; service inetd reload + - When a modification is made to - /etc/inetd.conf, - inetd can be forced to re-read its - configuration file by sending a HangUP signal to the - inetd process as shown: + Typically, the default entry for an application does not + need to be edited beyond removing the #. + In some situations, it may be appropriate to edit the default + entry. - - Sending <application>inetd</application> a HangUP Signal + As an example, this is the default entry for &man.ftpd.8; + over IPv4: - &prompt.root; kill -HUP `cat /var/run/inetd.pid` - + ftp stream tcp nowait root /usr/libexec/ftpd ftpd -l - Each line of the configuration file specifies an - individual daemon. Comments in the file are preceded by a - #. The format of - /etc/inetd.conf is as follows: + The seven columns in an entry are as follows: service-name socket-type protocol -{wait|nowait}[/max-child[/max-connections-per-ip-per-minute]] +{wait|nowait}[/max-child[/max-connections-per-ip-per-minute[/max-child-per-ip]]] user[:group][/login-class] server-program server-program-arguments - An example entry for the ftpd daemon - using IPv4: - - ftp stream tcp nowait root /usr/libexec/ftpd ftpd -l + where: service-name - This is the service name of the particular daemon. - It must correspond to a service listed in + The service name of the daemon to start. It must + correspond to a service listed in /etc/services. This determines - which port inetd must listen - to. If a new service is being created, it must be - placed in /etc/services - first. + which port inetd listens on + for incoming connections to that service. When using a + custom service, it must first be added to + /etc/services. @@ -336,10 +236,10 @@ Either stream, dgram, raw, or - seqpacket. stream - must be used for connection-based, TCP daemons, while - dgram is used for daemons utilizing - the UDP transport protocol. + seqpacket. Use + stream for TCP connections and + dgram for + UDP services. @@ -347,40 +247,47 @@ protocol - One of the following: + Use one of the following protocol names: - Protocol + Protocol Name Explanation + - tcp, tcp4 + tcp or tcp4 TCP IPv4 + - udp, udp4 - UDP IPv4 + udp or udp4 + UDP IPv4 + tcp6 TCP IPv6 + udp6 - UDP IPv6 + UDP IPv6 + tcp46 - Both TCP IPv4 and v6 + Both TCP IPv4 and IPv6 + udp46 - Both UDP IPv4 and v6 + Both UDP IPv4 and + IPv6 @@ -389,61 +296,51 @@ - {wait|nowait}[/max-child[/max-connections-per-ip-per-minute]] + {wait|nowait}[/max-child[/max-connections-per-ip-per-minute[/max-child-per-ip]]] - indicates whether the - daemon invoked from inetd is - able to handle its own socket or not. - socket types must use the - option, while stream socket - daemons, which are usually multi-threaded, should use - . usually - hands off multiple sockets to a single daemon, while - spawns a child daemon for each - new socket. + In this field, or + must be specified. + , + and + are optional. + + indicates whether or + not the service is able to handle its own socket. + socket types must use + while + daemons, which are usually + multi-threaded, should use . + usually hands off multiple sockets + to a single daemon, while spawns + a child daemon for each new socket. The maximum number of child daemons - inetd may spawn can be set - using the option. If a limit - of ten instances of a particular daemon is needed, a - /10 would be placed after - . - - In addition to , another - option limiting the maximum connections from a single - place to a particular daemon can be enabled. - does - just this. A value of ten here would limit any particular - IP address connecting to a particular service to ten - attempts per minute. This is useful to prevent - intentional or unintentional resource consumption and - Denial of Service (DoS) attacks to a machine. + inetd may spawn is set by + . For example, to limit ten + instances of the daemon, place a /10 + after . Specifying + /0 allows an unlimited number of + children. + + + limits the number of connections from any particular + IP address per minute. Once the + limit is reached, further connections from this IP + address will be dropped until the end of the minute. + For example, a value of /10 would + limit any particular IP address to + ten connection attempts per minute. + limits the number of + child processes that can be started on behalf on any + single IP address at any moment. + These options can limit excessive resource consumption + and help to prevent Denial of Service attacks. - In this field, or - is mandatory. - and - are - optional. - - A stream-type multi-threaded daemon without any - or - limits - would simply be: nowait. - - The same daemon with a maximum limit of ten daemons - would read: nowait/10. - - Additionally, the same setup with a limit of twenty - connections per IP address per minute and a maximum - total limit of ten child daemons would read: - nowait/10/20. - - These options are all utilized by the default - settings of the fingerd daemon, - as seen here: + An example can be seen in the default settings for + &man.fingerd.8;: - finger stream tcp nowait/3/10 nobody /usr/libexec/fingerd fingerd -s + finger stream tcp nowait/3/10 nobody /usr/libexec/fingerd fingerd -k -s @@ -451,12 +348,11 @@ user - This is the username that the particular daemon - should run as. Most commonly, daemons run as the - root user. For security purposes, it is - common to find some servers running as the - daemon user, or the least privileged - nobody user. + The username the daemon + will run as. Daemons typically run as + root, + daemon, or + nobody. @@ -464,11 +360,9 @@ server-program - The full path of the daemon to be executed when a - connection is received. If the daemon is a service - provided by inetd internally, - then should be - used. + The full path to the daemon. If the daemon is a + service provided by inetd + internally, use . @@ -476,150 +370,207 @@ server-program-arguments - This works in conjunction with - by specifying the - arguments, starting with argv[0], - passed to the daemon on invocation. If - mydaemon -d is the command line, - mydaemon -d would be the value of - . Again, if - the daemon is an internal service, use - here. + Used to specify any command arguments to be passed + to the daemon on invocation. If the daemon is an + internal service, use + . - - Security + + Command-Line Options - Depending on the security profile chosen at install, many - of inetd's daemons may be enabled - by default. If there is no apparent need for a particular - daemon, disable it! Place a # in front of the - daemon in question in /etc/inetd.conf, - and then send a hangup - signal to inetd. Some daemons, such as - fingerd, may not be desired at all - because they provide an attacker with too much - information. + Like most server daemons, inetd + has a number of options that can be used to modify its + behaviour. By default, inetd is + started with -wW -C 60. These options + enable TCP wrappers for all services, including internal + services, and prevent any IP address from + requesting any service more than 60 times per minute. + + To change the default options which are passed to + inetd, add an entry for + inetd_flags in + /etc/rc.conf. If + inetd is already running, restart + it with service inetd restart. - Some daemons are not security-conscious and have long, or - non-existent timeouts for connection attempts. This allows an - attacker to slowly send connections to a particular daemon, - thus saturating available resources. It may be a good idea to - place and - limitations on certain - daemons. - - By default, TCP wrapping is turned on. Consult the - &man.hosts.access.5; manual page for more information on placing - TCP restrictions on various inetd - invoked daemons. - + The available rate limiting options are: + + + + -c maximum - - Miscellaneous + + Specify the default maximum number of simultaneous + invocations of each service, where the default is + unlimited. May be overridden on a per-service basis by + using in + /etc/inetd.conf. + + - daytime, - time, - echo, - discard, - chargen, and - auth are all internally provided - services of inetd. - - The auth service provides - identity (ident, - identd) network services, and is - configurable to a certain degree. + + -C rate - Consult the &man.inetd.8; manual page for more in-depth - information. + + Specify the default maximum number of times a + service can be invoked from a single + IP address per minute. May be + overridden on a per-service basis by using + in + /etc/inetd.conf. + + + + + -R rate + + + Specify the maximum number of times a service can be + invoked in one minute, where the default is + 256. A rate of 0 + allows an unlimited number. + + + + + -s maximum + + + Specify the maximum number of times a service can be + invoked from a single IP address at + any one time, where the default is unlimited. May be + overridden on a per-service basis by using + in + /etc/inetd.conf. + + + + + Additional options are available. Refer to &man.inetd.8; + for the full list of options. + + + + Security Considerations + + Many of the daemons which can be managed by + inetd are not security-conscious. + Some daemons, such as fingerd, can + provide information that may be useful to an attacker. Only + enable the services which are needed and monitor the system + for excessive connection attempts. + max-connections-per-ip-per-minute, + max-child and + max-child-per-ip can be used to limit such + attacks. + + By default, TCP wrappers is enabled. Consult + &man.hosts.access.5; for more information on placing TCP + restrictions on various + inetd invoked daemons. - Network File System (NFS) + + Network File System (NFS) + - TomRhodesReorganized and enhanced by + + + Tom + Rhodes + + Reorganized and enhanced by + - BillSwingleWritten by + + + Bill + Swingle + + Written by + - NFS - Among the many different file systems that FreeBSD supports - is the Network File System, also known as NFS. NFS allows a system to share directories and - files with others over a network. By using NFS, users and programs can - access files on remote systems almost as if they were local - files. + &os; supports the Network File System + (NFS), which allows a server to share + directories and files with clients over a network. With + NFS, users and programs can access files on + remote systems as if they were stored locally. - Some of the most notable benefits that - NFS can provide are: + NFS has many practical uses. Some of + the more common uses include: - Local workstations use less disk space because commonly - used data can be stored on a single machine and still remain - accessible to others over the network. + Data that would otherwise be duplicated on each client + can be kept in a single location and accessed by clients + on the network. - There is no need for users to have separate home - directories on every network machine. Home directories - could be set up on the NFS server and - made available throughout the network. + Several clients may need access to the + /usr/ports/distfiles directory. + Sharing that directory allows for quick access to the + source files without having to download them to each + client. - Storage devices such as floppy disks, CDROM drives, and - &iomegazip; drives can be used by other machines on the network. - This may reduce the number of removable media drives - throughout the network. + On large networks, it is often more convenient to + configure a central NFS server on which + all user home directories are stored. Users can log into + a client anywhere on the network and have access to their + home directories. - - - How <acronym>NFS</acronym> Works + + Administration of NFS exports is + simplified. For example, there is only one file system + where security or backup policies must be set. + + + + Removable media storage devices can be used by other + machines on the network. This reduces the number of devices + throughout the network and provides a centralized location + to manage their security. It is often more convenient to + install software on multiple machines from a centralized + installation media. + + - NFS consists of at least two main - parts: a server and one or more clients. The client remotely - accesses the data that is stored on the server machine. In - order for this to function properly a few processes have to be - configured and running. - - Under &os; 4.X, the portmap - utility is used in place of the - rpcbind utility. Thus, in &os; 4.X - the user is required to replace every instance of - rpcbind with - portmap in the forthcoming - examples. + NFS consists of a server and one or more + clients. The client remotely accesses the data that is stored + on the server machine. In order for this to function properly, + a few processes have to be configured and running. - The server has to be running the following daemons: - - NFS - server - - - file server - UNIX clients + These daemons must be running on the server: + + NFS + server + + + file server + UNIX clients rpcbind - portmap - - - mountd + mountd - nfsd + nfsd @@ -633,151 +584,119 @@ Description + nfsd The NFS daemon which services - requests from the NFS - clients. + requests from NFS clients. + mountd - The NFS mount daemon which carries out - the requests that &man.nfsd.8; passes on to it. + The NFS mount daemon which + carries out requests received from + nfsd. + rpcbind - This daemon allows - NFS clients to discover which port - the NFS server is using. + This daemon allows NFS + clients to discover which port the + NFS server is using. - The client can also run a daemon, known as - nfsiod. The - nfsiod daemon services the requests - from the NFS server. This is optional, and - improves performance, but is not required for normal and - correct operation. See the &man.nfsiod.8; manual page for - more information. - - - - - Configuring <acronym>NFS</acronym> - - NFS - configuration - - - NFS configuration is a relatively - straightforward process. The processes that need to be - running can all start at boot time with a few modifications to - your /etc/rc.conf file. - - On the NFS server, make sure that the - following options are configured in the - /etc/rc.conf file: + Running &man.nfsiod.8; on the client can improve + performance, but is not required. - rpcbind_enable="YES" -nfs_server_enable="YES" -mountd_flags="-r" - - mountd runs automatically - whenever the NFS server is enabled. + + Configuring the Server - On the client, make sure this option is present in - /etc/rc.conf: - - nfs_client_enable="YES" - - The /etc/exports file specifies which - file systems NFS should export (sometimes - referred to as share). Each line in - /etc/exports specifies a file system to be - exported and which machines have access to that file system. - Along with what machines have access to that file system, - access options may also be specified. There are many such - options that can be used in this file but only a few will be - mentioned here. You can easily discover other options by - reading over the &man.exports.5; manual page. + + NFS + configuration + - Here are a few example /etc/exports - entries: + The file systems which the NFS server + will share are specified in /etc/exports. + Each line in this file specifies a file system to be exported, + which clients have access to that file system, and any access + options. When adding entries to this file, each exported file + system, its properties, and allowed hosts must occur on a + single line. If no clients are listed in the entry, then any + client on the network can mount that file system. - NFS - export examples + NFS + export examples - The following examples give an idea of how to export - file systems, although the settings may be different depending - on your environment and network configuration. For instance, - to export the /cdrom directory to three - example machines that have the same domain name as the server - (hence the lack of a domain name for each) or have entries in - your /etc/hosts file. The - flag makes the exported file system - read-only. With this flag, the remote system will not be able - to write any changes to the exported file system. - - /cdrom -ro host1 host2 host3 - - The following line exports /home to - three hosts by IP address. This is a useful setup if you have - a private network without a DNS server - configured. Optionally the /etc/hosts - file could be configured for internal hostnames; please review - &man.hosts.5; for more information. The - flag allows the subdirectories to be - mount points. In other words, it will not mount the - subdirectories but permit the client to mount only the - directories that are required or needed. + The following /etc/exports entries + demonstrate how to export file systems. The examples can be + modified to match the file systems and client names on the + reader's network. There are many options that can be used in + this file, but only a few will be mentioned here. See + &man.exports.5; for the full list of options. + + This example shows how to export + /cdrom to three hosts named + alpha, + bravo, and + charlie: + + /cdrom -ro alpha bravo charlie + + The -ro flag makes the file system + read-only, preventing clients from making any changes to the + exported file system. This example assumes that the host + names are either in DNS or in + /etc/hosts. Refer to &man.hosts.5; if + the network does not have a DNS + server. + + The next example exports /home to + three clients by IP address. This can be + useful for networks without DNS or + /etc/hosts entries. The + -alldirs flag allows subdirectories to be + mount points. In other words, it will not automatically mount + the subdirectories, but will permit the client to mount the + directories that are required as needed. /home -alldirs 10.0.0.2 10.0.0.3 10.0.0.4 - The following line exports /a so that - two clients from different domains may access the file system. - The flag allows the - root user on the remote system to write - data on the exported file system as root. - If the -maproot=root flag is not specified, - then even if a user has root access on - the remote system, he will not be able to modify files on - the exported file system. + This next example exports /a so that + two clients from different domains may access that file + system. The allows root on the remote system to + write data on the exported file system as root. If + -maproot=root is not specified, the + client's root user + will be mapped to the server's nobody account and will be + subject to the access limitations defined for nobody. /a -maproot=root host.example.com box.example.org - In order for a client to access an exported file system, - the client must have permission to do so. Make sure the - client is listed in your /etc/exports - file. - - In /etc/exports, each line represents - the export information for one file system to one host. A - remote host can only be specified once per file system, and may - only have one default entry. For example, assume that - /usr is a single file system. The - following /etc/exports would be - invalid: + A client can only be specified once per file system. For + example, if /usr is a single file system, + these entries would be invalid as both entries specify the + same host: # Invalid when /usr is one file system /usr/src client /usr/ports client - One file system, /usr, has two lines - specifying exports to the same host, client. - The correct format for this situation is: + The correct format for this situation is to use one + entry: /usr/src /usr/ports client - The properties of one file system exported to a given host - must all occur on one line. Lines without a client specified - are treated as a single host. This limits how you can export - file systems, but for most people this is not an issue. - The following is an example of a valid export list, where /usr and /exports are local file systems: @@ -791,140 +710,156 @@ /exports -alldirs -maproot=root client01 client02 /exports/obj -ro - You must restart - mountd whenever you modify - /etc/exports so the changes can take effect. - This can be accomplished by sending the HUP signal - to the mountd process: - - &prompt.root; kill -HUP `cat /var/run/mountd.pid` - - Alternatively, a reboot will make FreeBSD set everything - up properly. A reboot is not necessary though. - Executing the following commands as root - should start everything up. - - On the NFS server: - - &prompt.root; rpcbind -&prompt.root; nfsd -u -t -n 4 -&prompt.root; mountd -r - - On the NFS client: - - &prompt.root; nfsiod -n 4 - - Now everything should be ready to actually mount a remote file - system. In these examples the - server's name will be server and the client's - name will be client. If you only want to - temporarily mount a remote file system or would rather test the - configuration, just execute a command like this as root on the - client: + To enable the processes required by the + NFS server at boot time, add these options + to /etc/rc.conf: + + rpcbind_enable="YES" +nfs_server_enable="YES" +mountd_flags="-r" + + The server can be started now by running this + command: + + &prompt.root; service nfsd start + + Whenever the NFS server is started, + mountd also starts automatically. + However, mountd only reads + /etc/exports when it is started. To make + subsequent /etc/exports edits take effect + immediately, force mountd to reread + it: + + &prompt.root; service mountd reload + + + + Configuring the Client + + To enable NFS clients, set this option + in each client's /etc/rc.conf: + + nfs_client_enable="YES" + + Then, run this command on each NFS + client: + + &prompt.root; service nfsclient start + + The client now has everything it needs to mount a remote + file system. In these examples, the server's name is + server and the client's name is + client. To mount + /home on + server to the + /mnt mount point on + client: + - NFS - mounting + NFS + mounting &prompt.root; mount server:/home /mnt - This will mount the /home directory - on the server at /mnt on the client. If - everything is set up correctly you should be able to enter - /mnt on the client and see all the files - that are on the server. - - If you want to automatically mount a remote file system - each time the computer boots, add the file system to the - /etc/fstab file. Here is an example: + The files and directories in + /home will now be available on + client, in the + /mnt directory. + + To mount a remote file system each time the client boots, + add it to /etc/fstab: server:/home /mnt nfs rw 0 0 - The &man.fstab.5; manual page lists all the available - options. + Refer to &man.fstab.5; for a description of all available + options. - Practical Uses + Locking - NFS has many practical uses. Some of - the more common ones are listed below: + Some applications require file locking to operate + correctly. To enable locking, add these lines to + /etc/rc.conf on both the client and + server: - - NFS - uses - - - - Set several machines to share a CDROM or other media - among them. This is cheaper and often a more convenient - method to install software on multiple machines. - + rpc_lockd_enable="YES" +rpc_statd_enable="YES" - - On large networks, it might be more convenient to - configure a central NFS server in which - to store all the user home directories. These home - directories can then be exported to the network so that - users would always have the same home directory, - regardless of which workstation they log in to. - + Then start the applications: - - Several machines could have a common - /usr/ports/distfiles directory. That - way, when you need to install a port on several machines, - you can quickly access the source without downloading it - on each machine. - - + &prompt.root; service lockd start +&prompt.root; service statd start + + If locking is not required on the server, the + NFS client can be configured to lock + locally by including when running + mount. Refer to &man.mount.nfs.8; + for further details. - Automatic Mounts with <application>amd</application> + + Automating Mounts with &man.amd.8; + - WylieStilwellContributed by + + + Wylie + Stilwell + + Contributed by + - ChernLeeRewritten by + + + Chern + Lee + + Rewritten by + - amd - automatic mounter daemon + + automatic mounter daemon + - &man.amd.8; (the automatic mounter daemon) - automatically mounts a - remote file system whenever a file or directory within that - file system is accessed. Filesystems that are inactive for a - period of time will also be automatically unmounted by - amd. Using - amd provides a simple alternative - to permanent mounts, as permanent mounts are usually listed in - /etc/fstab. - - amd operates by attaching - itself as an NFS server to the /host and - /net directories. When a file is accessed - within one of these directories, amd - looks up the corresponding remote mount and automatically mounts - it. /net is used to mount an exported - file system from an IP address, while /host - is used to mount an export from a remote hostname. - - An access to a file within - /host/foobar/usr would tell - amd to attempt to mount the + The automatic mounter daemon, + amd, automatically mounts a remote + file system whenever a file or directory within that file + system is accessed. File systems that are inactive for a + period of time will be automatically unmounted by + amd. + + This daemon provides an alternative to modifying + /etc/fstab to list every client. It + operates by attaching itself as an NFS + server to the /host and + /net directories. When a file is + accessed within one of these directories, + amd looks up the corresponding + remote mount and automatically mounts it. + /net is used to mount an exported file + system from an IP address while + /host is used to mount an export from a + remote hostname. For instance, an attempt to access a file + within /host/foobar/usr would tell + amd to mount the /usr export on the host foobar. - Mounting an Export with <application>amd</application> + Mounting an Export with + <application>amd</application> - You can view the available mounts of a remote host with - the showmount command. For example, to - view the mounts of a host named foobar, you - can use: + In this example, showmount -e shows + the exported file systems that can be mounted from the + NFS server, + foobar: &prompt.user; showmount -e foobar Exports list on foobar: @@ -933,212 +868,215 @@ &prompt.user; cd /host/foobar/usr - As seen in the example, the showmount shows - /usr as an export. When changing directories to - /host/foobar/usr, amd - attempts to resolve the hostname foobar and - automatically mount the desired export. + The output from showmount shows + /usr as an export. When changing + directories to /host/foobar/usr, + amd intercepts the request and + attempts to resolve the hostname + foobar. If successful, + amd automatically mounts the + desired export. - amd can be started by the - startup scripts by placing the following lines in - /etc/rc.conf: + To enable amd at boot time, add + this line to /etc/rc.conf: amd_enable="YES" - Additionally, custom flags can be passed to - amd from the - amd_flags option. By default, - amd_flags is set to: + To start amd now: + + &prompt.root; service amd start + + Custom flags can be passed to + amd from the + amd_flags environment variable. By + default, amd_flags is set to: amd_flags="-a /.amd_mnt -l syslog /host /etc/amd.map /net /etc/amd.map" - The /etc/amd.map file defines the - default options that exports are mounted with. The - /etc/amd.conf file defines some of the more - advanced features of amd. + The default options with which exports are mounted are + defined in /etc/amd.map. Some of the + more advanced features of amd are + defined in /etc/amd.conf. - Consult the &man.amd.8; and &man.amd.conf.5; manual pages for more + Consult &man.amd.8; and &man.amd.conf.5; for more information. - - Problems Integrating with Other Systems - - JohnLindContributed by - - - + + Automating Mounts with &man.autofs.5; + + + The &man.autofs.5; automount facility is supported + starting with &os; 10.1-RELEASE. To use the + automounter functionality in older versions of &os;, use + &man.amd.8; instead. This chapter only describes the + &man.autofs.5; automounter. + + + + autofs + + automounter subsystem + + + The &man.autofs.5; facility is a common name for several + components that, together, allow for automatic mounting of + remote and local filesystems whenever a file or directory + within that file system is accessed. It consists of the + kernel component, &man.autofs.5;, and several userspace + applications: &man.automount.8;, &man.automountd.8; and + &man.autounmountd.8;. It serves as an alternative for + &man.amd.8; from previous &os; releases. Amd is still + provided for backward compatibility purposes, as the two use + different map format; the one used by autofs is the same as + with other SVR4 automounters, such as the ones in Solaris, + MacOS X, and Linux. + + The &man.autofs.5; virtual filesystem is mounted on + specified mountpoints by &man.automount.8;, usually invoked + during boot. + + Whenever a process attempts to access file within the + &man.autofs.5; mountpoint, the kernel will notify + &man.automountd.8; daemon and pause the triggering process. + The &man.automountd.8; daemon will handle kernel requests by + finding the proper map and mounting the filesystem according + to it, then signal the kernel to release blocked process. The + &man.autounmountd.8; daemon automatically unmounts automounted + filesystems after some time, unless they are still being + used. + + The primary autofs configuration file is + /etc/auto_master. It assigns individual + maps to top-level mounts. For an explanation of + auto_master and the map syntax, refer to + &man.auto.master.5;. + + There is a special automounter map mounted on + /net. When a file is accessed within + this directory, &man.autofs.5; looks up the corresponding + remote mount and automatically mounts it. For instance, an + attempt to access a file within + /net/foobar/usr would tell + &man.automountd.8; to mount the /usr export from the host + foobar. + + + Mounting an Export with &man.autofs.5; - Certain Ethernet adapters for ISA PC systems have limitations - which can lead to serious network problems, particularly with NFS. - This difficulty is not specific to FreeBSD, but FreeBSD systems - are affected by it. - - The problem nearly always occurs when (FreeBSD) PC systems are - networked with high-performance workstations, such as those made - by Silicon Graphics, Inc., and Sun Microsystems, Inc. The NFS - mount will work fine, and some operations may succeed, but - suddenly the server will seem to become unresponsive to the - client, even though requests to and from other systems continue to - be processed. This happens to the client system, whether the - client is the FreeBSD system or the workstation. On many systems, - there is no way to shut down the client gracefully once this - problem has manifested itself. The only solution is often to - reset the client, because the NFS situation cannot be - resolved. - - Though the correct solution is to get a - higher performance and capacity Ethernet adapter for the - FreeBSD system, there is a simple workaround that will allow - satisfactory operation. If the FreeBSD system is the - server, include the option - on the mount from the client. If the - FreeBSD system is the client, then mount - the NFS file system with the option . - These options may be specified using the fourth field of the - fstab entry on the client for automatic - mounts, or by using the parameter of the - &man.mount.8; command for manual mounts. - - It should be noted that there is a different problem, - sometimes mistaken for this one, when the NFS servers and - clients are on different networks. If that is the case, make - certain that your routers are routing the - necessary UDP information, or you will not get anywhere, no - matter what else you are doing. - - In the following examples, fastws is the host - (interface) name of a high-performance workstation, and - freebox is the host (interface) name of a FreeBSD - system with a lower-performance Ethernet adapter. Also, - /sharedfs will be the exported NFS - file system (see &man.exports.5;), and - /project will be the mount point on the - client for the exported file system. In all cases, note that - additional options, such as or - and may be desirable in - your application. - - Examples for the FreeBSD system (freebox) - as the client in /etc/fstab on - freebox: - - fastws:/sharedfs /project nfs rw,-r=1024 0 0 - - As a manual mount command on freebox: - - &prompt.root; mount -t nfs -o -r=1024 fastws:/sharedfs /project - - Examples for the FreeBSD system as the server in - /etc/fstab on - fastws: - - freebox:/sharedfs /project nfs rw,-w=1024 0 0 - - As a manual mount command on fastws: - - &prompt.root; mount -t nfs -o -w=1024 freebox:/sharedfs /project - - Nearly any 16-bit Ethernet adapter will allow operation - without the above restrictions on the read or write size. - - For anyone who cares, here is what happens when the - failure occurs, which also explains why it is unrecoverable. - NFS typically works with a block size of - 8 K (though it may do fragments of smaller sizes). Since - the maximum Ethernet packet is around 1500 bytes, the NFS - block gets split into multiple Ethernet - packets, even though it is still a single unit to the - upper-level code, and must be received, assembled, and - acknowledged as a unit. The - high-performance workstations can pump out the packets which - comprise the NFS unit one right after the other, just as close - together as the standard allows. On the smaller, lower - capacity cards, the later packets overrun the earlier packets - of the same unit before they can be transferred to the host - and the unit as a whole cannot be reconstructed or - acknowledged. As a result, the workstation will time out and - try again, but it will try again with the entire 8 K - unit, and the process will be repeated, ad infinitum. - - By keeping the unit size below the Ethernet packet size - limitation, we ensure that any complete Ethernet packet - received can be acknowledged individually, avoiding the - deadlock situation. - - Overruns may still occur when a high-performance - workstations is slamming data out to a PC system, but with the - better cards, such overruns are not guaranteed on NFS - units. When an overrun occurs, the units - affected will be retransmitted, and there will be a fair - chance that they will be received, assembled, and - acknowledged. + In this example, showmount -e shows + the exported file systems that can be mounted from the + NFS server, + foobar: + + &prompt.user; showmount -e foobar +Exports list on foobar: +/usr 10.10.10.0 +/a 10.10.10.0 +&prompt.user; cd /net/foobar/usr + + + The output from showmount shows + /usr as an export. + When changing directories to /host/foobar/usr, + &man.automountd.8; intercepts the request and attempts to + resolve the hostname foobar. If successful, + &man.automountd.8; automatically mounts the source + export. + + To enable &man.autofs.5; at boot time, add this line to + /etc/rc.conf: + + autofs_enable="YES" + + Then &man.autofs.5; can be started by running: + + &prompt.root; service automount start +&prompt.root; service automountd start +&prompt.root; service autounmountd start + + The &man.autofs.5; map format is the same as in other + operating systems, it might be desirable to consult + information from other operating systems, such as the Mac + OS X document. + + Consult the &man.automount.8;, &man.automountd.8;, + &man.autounmountd.8;, and &man.auto.master.5; manual pages for + more information. - Network Information System (NIS/YP) + + Network Information System + (<acronym>NIS</acronym>) + + NIS + Solaris + HP-UX + AIX + Linux + NetBSD + OpenBSD + + yellow pages + NIS + - - What Is It? - NIS - Solaris - HP-UX - AIX - Linux - NetBSD - OpenBSD - - NIS, - which stands for Network Information Services, was developed - by Sun Microsystems to centralize administration of &unix; - (originally &sunos;) systems. It has now essentially become - an industry standard; all major &unix; like systems - (&solaris;, HP-UX, &aix;, Linux, NetBSD, OpenBSD, FreeBSD, - etc) support NIS. - - yellow pagesNIS - - NIS - was formerly known as Yellow Pages, but because of trademark - issues, Sun changed the name. The old term (and yp) is still - often seen and used. + Network Information System (NIS) is + designed to centralize administration of &unix;-like systems + such as &solaris;, HP-UX, &aix;, Linux, NetBSD, OpenBSD, and + &os;. NIS was originally known as Yellow + Pages but the name was changed due to trademark issues. This + is the reason why NIS commands begin with + yp. - - NIS - domains + + NIS + domains - It is a RPC-based client/server system that allows a group - of machines within an NIS domain to share a common set of - configuration files. This permits a system administrator to - set up NIS client systems with only minimal configuration data - and add, remove or modify configuration data from a single - location. - - Windows NT - - It is similar to the &windowsnt; domain system; although - the internal implementation of the two are not at all similar, - the basic functionality can be compared. - + NIS is a Remote Procedure Call + (RPC)-based client/server system that allows + a group of machines within an NIS domain to + share a common set of configuration files. This permits a + system administrator to set up NIS client + systems with only minimal configuration data and to add, remove, + or modify configuration data from a single location. + + &os; uses version 2 of the NIS + protocol. - Terms/Processes You Should Know + <acronym>NIS</acronym> Terms and Processes - There are several terms and several important user - processes that you will come across when attempting to - implement NIS on FreeBSD, whether you are trying to create an - NIS server or act as an NIS client: + Table 28.1 summarizes the terms and important processes + used by NIS: rpcbind @@ -1147,10 +1085,12 @@ portmap - + + <acronym>NIS</acronym> Terminology + - - + + @@ -1158,389 +1098,382 @@ Description + - NIS domainname + NIS domain name - An NIS master server and all of its clients - (including its slave servers) have a NIS domainname. - Similar to an &windowsnt; domain name, the NIS - domainname does not have anything to do with + NIS servers and clients share + an NIS domain name. Typically, + this name does not have anything to do with DNS. - - rpcbind - Must be running in order to enable - RPC (Remote Procedure Call, a - network protocol used by NIS). If - rpcbind is not running, it - will be impossible to run an NIS server, or to act as - an NIS client (Under &os; 4.X - portmap is used in place of - rpcbind). - - ypbind + &man.rpcbind.8; - Binds an NIS client to its NIS - server. It will take the NIS domainname from the - system, and using RPC, connect to - the server. ypbind is the - core of client-server communication in an NIS - environment; if ypbind dies - on a client machine, it will not be able to access the - NIS server. + This service enables RPC and + must be running in order to run an + NIS server or act as an + NIS client. + - ypserv - Should only be running on NIS servers; this is - the NIS server process itself. If &man.ypserv.8; - dies, then the server will no longer be able to - respond to NIS requests (hopefully, there is a slave - server to take over for it). There are some - implementations of NIS (but not the FreeBSD one), that - do not try to reconnect to another server if the - server it used before dies. Often, the only thing - that helps in this case is to restart the server - process (or even the whole server) or the - ypbind process on the - client. - + &man.ypbind.8; + This service binds an NIS + client to its NIS server. It will + take the NIS domain name and use + RPC to connect to the server. It + is the core of client/server communication in an + NIS environment. If this service + is not running on a client machine, it will not be + able to access the NIS + server. + - rpc.yppasswdd - Another process that should only be running on - NIS master servers; this is a daemon that will allow NIS - clients to change their NIS passwords. If this daemon - is not running, users will have to login to the NIS - master server and change their passwords there. + &man.ypserv.8; + This is the process for the + NIS server. If this service stops + running, the server will no longer be able to respond + to NIS requests so hopefully, there + is a slave server to take over. Some non-&os; clients + will not try to reconnect using a slave server and the + ypbind process may need to + be restarted on these + clients. + + + + &man.rpc.yppasswdd.8; + This process only runs on + NIS master servers. This daemon + allows NIS clients to change their + NIS passwords. If this daemon is + not running, users will have to login to the + NIS master server and change their + passwords there. - +
-
- How Does It Work? + Machine Types + + NIS + master server + + NIS + slave server + + NIS + client + + + There are three types of hosts in an + NIS environment: + + + + NIS master server + + This server acts as a central repository for host + configuration information and maintains the + authoritative copy of the files used by all of the + NIS clients. The + passwd, group, + and other various files used by NIS + clients are stored on the master server. While it is + possible for one machine to be an NIS + master server for more than one NIS + domain, this type of configuration will not be covered in + this chapter as it assumes a relatively small-scale + NIS environment. + + + + NIS slave servers + + NIS slave servers maintain copies + of the NIS master's data files in + order to provide redundancy. Slave servers also help to + balance the load of the master server as + NIS clients always attach to the + NIS server which responds + first. + + + + NIS clients - There are three types of hosts in an NIS environment: - master servers, slave servers, and clients. Servers act as a - central repository for host configuration information. Master - servers hold the authoritative copy of this information, while - slave servers mirror this information for redundancy. Clients - rely on the servers to provide this information to - them. + NIS clients authenticate + against the NIS server during log + on. + + - Information in many files can be shared in this manner. - The master.passwd, + Information in many files can be shared using + NIS. The + master.passwd, group, and hosts - files are commonly shared via NIS. Whenever a process on a - client needs information that would normally be found in these - files locally, it makes a query to the NIS server that it is - bound to instead. + files are commonly shared via NIS. + Whenever a process on a client needs information that would + normally be found in these files locally, it makes a query to + the NIS server that it is bound to + instead. + + + + Planning Considerations + + This section describes a sample NIS + environment which consists of 15 &os; machines with no + centralized point of administration. Each machine has its own + /etc/passwd and + /etc/master.passwd. These files are kept + in sync with each other only through manual intervention. + Currently, when a user is added to the lab, the process must + be repeated on all 15 machines. + + The configuration of the lab will be as follows: + + + + + + Machine name + IP address + Machine role + + + + + + ellington + 10.0.0.2 + NIS master + + + + coltrane + 10.0.0.3 + NIS slave + + + + basie + 10.0.0.4 + Faculty workstation + + + + bird + 10.0.0.5 + Client machine + + + + cli[1-11] + + 10.0.0.[6-17] + Other client machines + + + + + + If this is the first time an NIS + scheme is being developed, it should be thoroughly planned + ahead of time. Regardless of network size, several decisions + need to be made as part of the planning process. + + + Choosing a <acronym>NIS</acronym> Domain Name + + + NIS + domain name + + When a client broadcasts its requests for info, it + includes the name of the NIS domain that + it is part of. This is how multiple servers on one network + can tell which server should answer which request. Think of + the NIS domain name as the name for a + group of hosts. + + Some organizations choose to use their Internet domain + name for their NIS domain name. This is + not recommended as it can cause confusion when trying to + debug network problems. The NIS domain + name should be unique within the network and it is helpful + if it describes the group of machines it represents. For + example, the Art department at Acme Inc. might be in the + acme-art NIS domain. This + example will use the domain name + test-domain. + + However, some non-&os; operating systems require the + NIS domain name to be the same as the + Internet domain name. If one or more machines on the + network have this restriction, the Internet domain name + must be used as the + NIS domain name. + - Machine Types + Physical Server Requirements - - - A NIS master serverNISmaster server. This - server, analogous to a &windowsnt; primary domain - controller, maintains the files used by all of the NIS - clients. The passwd, - group, and other various files used - by the NIS clients live on the master server. - - It is possible for one machine to be an NIS - master server for more than one NIS domain. However, - this will not be covered in this introduction, which - assumes a relatively small-scale NIS - environment. - - - - NIS slave serversNISslave server. Similar to - the &windowsnt; backup domain controllers, NIS slave - servers maintain copies of the NIS master's data files. - NIS slave servers provide the redundancy, which is - needed in important environments. They also help to - balance the load of the master server: NIS Clients - always attach to the NIS server whose response they get - first, and this includes slave-server-replies. - - - - NIS clientsNISclient. NIS clients, like - most &windowsnt; workstations, authenticate against the - NIS server (or the &windowsnt; domain controller in the - &windowsnt; workstations case) to log on. - - + There are several things to keep in mind when choosing a + machine to use as a NIS server. Since + NIS clients depend upon the availability + of the server, choose a machine that is not rebooted + frequently. The NIS server should + ideally be a stand alone machine whose sole purpose is to be + an NIS server. If the network is not + heavily used, it is acceptable to put the + NIS server on a machine running other + services. However, if the NIS server + becomes unavailable, it will adversely affect all + NIS clients. - Using NIS/YP + Configuring the <acronym>NIS</acronym> Master + Server - This section will deal with setting up a sample NIS - environment. + The canonical copies of all NIS files + are stored on the master server. The databases used to store + the information are called NIS maps. In + &os;, these maps are stored in + /var/yp/[domainname] where + [domainname] is the name of the + NIS domain. Since multiple domains are + supported, it is possible to have several directories, one for + each domain. Each domain will have its own independent set of + maps. + + NIS master and slave servers handle all + NIS requests through &man.ypserv.8;. This + daemon is responsible for receiving incoming requests from + NIS clients, translating the requested + domain and map name to a path to the corresponding database + file, and transmitting data from the database back to the + client. - This section assumes that you are running - FreeBSD 3.3 or later. The instructions given here will - probably work for any version of FreeBSD - greater than 3.0, but there are no guarantees that this is - true. + NIS + server configuration + + Setting up a master NIS server can be + relatively straight forward, depending on environmental needs. + Since &os; provides built-in NIS support, + it only needs to be enabled by adding the following lines to + /etc/rc.conf: + nisdomainname="test-domain" +nis_server_enable="YES" +nis_yppasswdd_enable="YES" - - Planning + + + This line sets the NIS domain name + to test-domain. + - Let us assume that you are the administrator of a small - university lab. This lab, which consists of 15 FreeBSD - machines, currently has no centralized point of - administration; each machine has its own - /etc/passwd and - /etc/master.passwd. These files are - kept in sync with each other only through manual - intervention; currently, when you add a user to the lab, you - must run adduser on all 15 machines. - Clearly, this has to change, so you have decided to convert - the lab to use NIS, using two of the machines as - servers. - - Therefore, the configuration of the lab now looks something - like: - - - - - - Machine name - IP address - Machine role - - - - - ellington - 10.0.0.2 - NIS master - - - coltrane - 10.0.0.3 - NIS slave - - - basie - 10.0.0.4 - Faculty workstation - - - bird - 10.0.0.5 - Client machine - - - cli[1-11] - 10.0.0.[6-17] - Other client machines - - - - - - If you are setting up a NIS scheme for the first time, it - is a good idea to think through how you want to go about it. No - matter what the size of your network, there are a few decisions - that need to be made. - - - Choosing a NIS Domain Name - - - NIS - domainname - - This might not be the domainname that - you are used to. It is more accurately called the - NIS domainname. When a client broadcasts - its requests for info, it includes the name of the NIS - domain that it is part of. This is how multiple servers - on one network can tell which server should answer which - request. Think of the NIS domainname as the name for a - group of hosts that are related in some way. - - Some organizations choose to use their Internet - domainname for their NIS domainname. This is not - recommended as it can cause confusion when trying to debug - network problems. The NIS domainname should be unique - within your network and it is helpful if it describes the - group of machines it represents. For example, the Art - department at Acme Inc. might be in the - acme-art NIS domain. For this example, - assume you have chosen the name - test-domain. - - SunOS - However, some operating systems (notably &sunos;) use - their NIS domain name as their Internet domain name. If one - or more machines on your network have this restriction, you - must use the Internet domain name as - your NIS domain name. - - - - Physical Server Requirements - - There are several things to keep in mind when choosing - a machine to use as a NIS server. One of the unfortunate - things about NIS is the level of dependency the clients - have on the server. If a client cannot contact the server - for its NIS domain, very often the machine becomes - unusable. The lack of user and group information causes - most systems to temporarily freeze up. With this in mind - you should make sure to choose a machine that will not be - prone to being rebooted regularly, or one that might be - used for development. The NIS server should ideally be a - stand alone machine whose sole purpose in life is to be an - NIS server. If you have a network that is not very - heavily used, it is acceptable to put the NIS server on a - machine running other services, just keep in mind that if - the NIS server becomes unavailable, it will affect - all of your NIS clients - adversely. - - + + This automates the start up of the + NIS server processes when the system + boots. + + + + This enables the &man.rpc.yppasswdd.8; daemon so that + users can change their NIS password + from a client machine. + + + + Care must be taken in a multi-server domain where the + server machines are also NIS clients. It + is generally a good idea to force the servers to bind to + themselves rather than allowing them to broadcast bind + requests and possibly become bound to each other. Strange + failure modes can result if one server goes down and others + are dependent upon it. Eventually, all the clients will time + out and attempt to bind to other servers, but the delay + involved can be considerable and the failure mode is still + present since the servers might bind to each other all over + again. + + A server that is also a client can be forced to bind to a + particular server by adding these additional lines to + /etc/rc.conf: + + nis_client_enable="YES" # run client stuff as well +nis_client_flags="-S NIS domain,server" + + After saving the edits, type + /etc/netstart to restart the network and + apply the values defined in /etc/rc.conf. + Before initializing the NIS maps, start + &man.ypserv.8;: + + &prompt.root; service ypserv start - NIS Servers + Initializing the <acronym>NIS</acronym> Maps - The canonical copies of all NIS information are stored - on a single machine called the NIS master server. The - databases used to store the information are called NIS maps. - In FreeBSD, these maps are stored in - /var/yp/[domainname] where - [domainname] is the name of the NIS - domain being served. A single NIS server can support - several domains at once, therefore it is possible to have - several such directories, one for each supported domain. - Each domain will have its own independent set of - maps. - - NIS master and slave servers handle all NIS requests - with the ypserv daemon. - ypserv is responsible for receiving - incoming requests from NIS clients, translating the - requested domain and map name to a path to the corresponding - database file and transmitting data from the database back - to the client. - - - Setting Up a NIS Master Server - - NIS - server configuration - - Setting up a master NIS server can be relatively - straight forward, depending on your needs. FreeBSD comes - with support for NIS out-of-the-box. All you need is to - add the following lines to - /etc/rc.conf, and FreeBSD will do the - rest for you. - - - - nisdomainname="test-domain" - This line will set the NIS domainname to - test-domain - upon network setup (e.g. after reboot). - - - nis_server_enable="YES" - This will tell FreeBSD to start up the NIS server processes - when the networking is next brought up. - - - nis_yppasswdd_enable="YES" - This will enable the rpc.yppasswdd - daemon which, as mentioned above, will allow users to - change their NIS password from a client machine. - - - - - Depending on your NIS setup, you may need to add - further entries. See the section about NIS - servers that are also NIS clients, below, for - details. - - - Now, all you have to do is to run the command - /etc/netstart as superuser. It will - set up everything for you, using the values you defined in - /etc/rc.conf. - - - - Initializing the NIS Maps - - NIS - maps - - The NIS maps are database files, - that are kept in the /var/yp - directory. They are generated from configuration files in - the /etc directory of the NIS master, - with one exception: the - /etc/master.passwd file. This is for - a good reason, you do not want to propagate passwords to - your root and other administrative - accounts to all the servers in the NIS domain. Therefore, - before we initialize the NIS maps, you should: + + NIS + maps + + NIS maps are generated from the + configuration files in /etc on the + NIS master, with one exception: + /etc/master.passwd. This is to prevent + the propagation of passwords to all the servers in the + NIS domain. Therefore, before the + NIS maps are initialized, configure the + primary password files: - &prompt.root; cp /etc/master.passwd /var/yp/master.passwd + &prompt.root; cp /etc/master.passwd /var/yp/master.passwd &prompt.root; cd /var/yp &prompt.root; vi master.passwd - You should remove all entries regarding system - accounts (bin, - tty, kmem, - games, etc), as well as any accounts - that you do not want to be propagated to the NIS clients - (for example root and any other UID 0 - (superuser) accounts). - - Make sure the - /var/yp/master.passwd is neither group - nor world readable (mode 600)! Use the - chmod command, if appropriate. - - Tru64 UNIX - - When you have finished, it is time to initialize the - NIS maps! FreeBSD includes a script named - ypinit to do this for you (see its - manual page for more information). Note that this script - is available on most &unix; Operating Systems, but not on - all. On Digital UNIX/Compaq Tru64 UNIX it is called - ypsetup. Because we are generating - maps for an NIS master, we are going to pass the - option to ypinit. - To generate the NIS maps, assuming you already performed - the steps above, run: + It is advisable to remove all entries for system + accounts as well as any user accounts that do not need to be + propagated to the NIS clients, such as + the root and any + other administrative accounts. + + Ensure that the + /var/yp/master.passwd is neither group + or world readable by setting its permissions to + 600. + + + After completing this task, initialize the + NIS maps. &os; includes the + &man.ypinit.8; script to do this. When generating maps + for the master server, include and + specify the NIS domain name: - ellington&prompt.root; ypinit -m test-domain + ellington&prompt.root; ypinit -m test-domain Server Type: MASTER Domain: test-domain Creating an YP server will require that you answer a few questions. Questions will all be asked at the beginning of the procedure. Do you want this procedure to quit on non-fatal errors? [y/n: n] n Ok, please remember to go back and redo manually whatever fails. -If you don't, something might not work. +If not, something might not work. At this point, we have to construct a list of this domains YP servers. rod.darktech.org is already known as master server. Please continue to add any slave servers, one per line. When you are @@ -1558,40 +1491,58 @@ NIS Map update completed. ellington has been setup as an YP master server without any errors. - ypinit should have created - /var/yp/Makefile from - /var/yp/Makefile.dist. - When created, this file assumes that you are operating - in a single server NIS environment with only FreeBSD - machines. Since test-domain has - a slave server as well, you must edit - /var/yp/Makefile: - - ellington&prompt.root; vi /var/yp/Makefile - - You should comment out the line that says - - NOPUSH = "True" - - (if it is not commented out already). - - - - Setting up a NIS Slave Server - - NIS - slave server - - Setting up an NIS slave server is even more simple than - setting up the master. Log on to the slave server and edit the - file /etc/rc.conf as you did before. - The only difference is that we now must use the - option when running ypinit. - The option requires the name of the NIS - master be passed to it as well, so our command line looks - like: + This will create /var/yp/Makefile + from /var/yp/Makefile.dist. By + default, this file assumes that the environment has a + single NIS server with only &os; clients. + Since test-domain has a slave server, + edit this line in /var/yp/Makefile so + that it begins with a comment + (#): + + NOPUSH = "True" + + + + Adding New Users + + Every time a new user is created, the user account must + be added to the master NIS server and the + NIS maps rebuilt. Until this occurs, the + new user will not be able to login anywhere except on the + NIS master. For example, to add the new + user jsmith to the + test-domain domain, run these commands on + the master server: + + &prompt.root; pw useradd jsmith +&prompt.root; cd /var/yp +&prompt.root; make test-domain + + The user could also be added using adduser + jsmith instead of pw useradd + smith. + + + + + Setting up a <acronym>NIS</acronym> Slave Server + + + NIS + slave server + + To set up an NIS slave server, log on + to the slave server and edit /etc/rc.conf + as for the master server. Do not generate any + NIS maps, as these already exist on the + master server. When running ypinit on the + slave server, use (for slave) instead of + (for master). This option requires the + name of the NIS master in addition to the + domain name, as seen in this example: - coltrane&prompt.root; ypinit -s ellington test-domain + coltrane&prompt.root; ypinit -s ellington test-domain Server Type: SLAVE Domain: test-domain Master: ellington @@ -1601,7 +1552,7 @@ Do you want this procedure to quit on non-fatal errors? [y/n: n] n Ok, please remember to go back and redo manually whatever fails. -If you don't, something might not work. +If not, something might not work. There will be no further questions. The remainder of the procedure should take a few minutes, to copy the databases from ellington. Transferring netgroup... @@ -1646,145 +1597,134 @@ ypxfr: Exiting: Map successfully transferred coltrane has been setup as an YP slave server without any errors. -Don't forget to update map ypservers on ellington. +Remember to update map ypservers on ellington. - You should now have a directory called - /var/yp/test-domain. Copies of the NIS - master server's maps should be in this directory. You will - need to make sure that these stay updated. The following - /etc/crontab entries on your slave - servers should do the job: + This will generate a directory on the slave server called + /var/yp/test-domain which contains copies + of the NIS master server's maps. Adding + these /etc/crontab entries on each slave + server will force the slaves to sync their maps with the maps + on the master server: - 20 * * * * root /usr/libexec/ypxfr passwd.byname + 20 * * * * root /usr/libexec/ypxfr passwd.byname 21 * * * * root /usr/libexec/ypxfr passwd.byuid - These two lines force the slave to sync its maps with - the maps on the master server. Although these entries are - not mandatory, since the master server attempts to ensure - any changes to its NIS maps are communicated to its slaves - and because password information is vital to systems - depending on the server, it is a good idea to force the - updates. This is more important on busy networks where map - updates might not always complete. - - Now, run the command /etc/netstart on the - slave server as well, which again starts the NIS server. - - - - - NIS Clients + These entries are not mandatory because the master server + automatically attempts to push any map changes to its slaves. + However, since clients may depend upon the slave server to + provide correct password information, it is recommended to + force frequent password map updates. This is especially + important on busy networks where map updates might not always + complete. + + To finish the configuration, run + /etc/netstart on the slave server in order + to start the NIS services. + - An NIS client establishes what is called a binding to a - particular NIS server using the - ypbind daemon. - ypbind checks the system's default - domain (as set by the domainname command), - and begins broadcasting RPC requests on the local network. - These requests specify the name of the domain for which - ypbind is attempting to establish a binding. - If a server that has been configured to serve the requested - domain receives one of the broadcasts, it will respond to - ypbind, which will record the server's - address. If there are several servers available (a master and - several slaves, for example), ypbind will - use the address of the first one to respond. From that point - on, the client system will direct all of its NIS requests to - that server. ypbind will - occasionally ping the server to make sure it is - still up and running. If it fails to receive a reply to one of - its pings within a reasonable amount of time, - ypbind will mark the domain as unbound and - begin broadcasting again in the hopes of locating another - server. - - - Setting Up a NIS Client - - NIS - client configuration - - Setting up a FreeBSD machine to be a NIS client is fairly - straightforward. - - - - Edit the file /etc/rc.conf and - add the following lines in order to set the NIS domainname - and start ypbind upon network - startup: + + Setting Up an <acronym>NIS</acronym> Client + + An NIS client binds to an + NIS server using &man.ypbind.8;. This + daemon broadcasts RPC requests on the local network. These + requests specify the domain name configured on the client. If + an NIS server in the same domain receives + one of the broadcasts, it will respond to + ypbind, which will record the + server's address. If there are several servers available, + the client will use the address of the first server to respond + and will direct all of its NIS requests to + that server. The client will automatically + ping the server on a regular basis + to make sure it is still available. If it fails to receive a + reply within a reasonable amount of time, + ypbind will mark the domain as + unbound and begin broadcasting again in the hopes of locating + another server. - nisdomainname="test-domain" -nis_client_enable="YES" - + NIS + client configuration + - - To import all possible password entries from the NIS - server, remove all user accounts from your - /etc/master.passwd file and use - vipw to add the following line to - the end of the file: - - +::::::::: - - - This line will afford anyone with a valid account in - the NIS server's password maps an account. There are - many ways to configure your NIS client by changing this - line. See the netgroups - section below for more information. - For more detailed reading see O'Reilly's book on - Managing NFS and NIS. - - - - You should keep at least one local account (i.e. - not imported via NIS) in your - /etc/master.passwd and this - account should also be a member of the group - wheel. If there is something - wrong with NIS, this account can be used to log in - remotely, become root, and fix things. - - - - - To import all possible group entries from the NIS - server, add this line to your - /etc/group file: - - +:*:: - - - - After completing these steps, you should be able to run - ypcat passwd and see the NIS server's - passwd map. - - - + To configure a &os; machine to be an + NIS client: - - NIS Security + + + Edit /etc/rc.conf and add the + following lines in order to set the + NIS domain name and start + &man.ypbind.8; during network startup: - In general, any remote user can issue an RPC to - &man.ypserv.8; and retrieve the contents of your NIS maps, - provided the remote user knows your domainname. To prevent - such unauthorized transactions, &man.ypserv.8; supports a - feature called securenets which can be used to - restrict access to a given set of hosts. At startup, - &man.ypserv.8; will attempt to load the securenets information - from a file called - /var/yp/securenets. + nisdomainname="test-domain" +nis_client_enable="YES" + - - This path varies depending on the path specified with the - option. This file contains entries that - consist of a network specification and a network mask separated - by white space. Lines starting with # are - considered to be comments. A sample securenets file might look - like this: - + + To import all possible password entries from the + NIS server, use + vipw to remove all user accounts + except one from /etc/master.passwd. + When removing the accounts, keep in mind that at least one + local account should remain and this account should be a + member of wheel. If there is a + problem with NIS, this local account + can be used to log in remotely, become the superuser, and + fix the problem. Before saving the edits, add the + following line to the end of the file: + + +::::::::: + + This line configures the client to provide anyone with + a valid account in the NIS server's + password maps an account on the client. There are many + ways to configure the NIS client by + modifying this line. One method is described in . For more detailed + reading, refer to the book + Managing NFS and NIS, published by + O'Reilly Media. + + + + To import all possible group entries from the + NIS server, add this line to + /etc/group: + + +:*:: + + + + To start the NIS client immediately, + execute the following commands as the superuser: + + &prompt.root; /etc/netstart +&prompt.root; service ypbind start + + After completing these steps, running + ypcat passwd on the client should show + the server's passwd map. + + + + <acronym>NIS</acronym> Security + + Since RPC is a broadcast-based service, + any system running ypbind within + the same domain can retrieve the contents of the + NIS maps. To prevent unauthorized + transactions, &man.ypserv.8; supports a feature called + securenets which can be used to restrict access + to a given set of hosts. By default, this information is + stored in /var/yp/securenets, unless + &man.ypserv.8; is started with and an + alternate path. This file contains entries that consist of a + network specification and a network mask separated by white + space. Lines starting with # are + considered to be comments. A sample + securenets might look like this: # allow connections from local host -- mandatory 127.0.0.1 255.255.255.255 @@ -1800,82 +1740,65 @@ matches one of these rules, it will process the request normally. If the address fails to match a rule, the request will be ignored and a warning message will be logged. If the - /var/yp/securenets file does not exist, + securenets does not exist, ypserv will allow connections from any host. - The ypserv program also has support for - Wietse Venema's TCP Wrapper package. - This allows the administrator to use the - TCP Wrapper configuration files for - access control instead of - /var/yp/securenets. - - - While both of these access control mechanisms provide some - security, they, like the privileged port test, are - vulnerable to IP spoofing attacks. All - NIS-related traffic should be blocked at your firewall. - - Servers using /var/yp/securenets - may fail to serve legitimate NIS clients with archaic TCP/IP - implementations. Some of these implementations set all - host bits to zero when doing broadcasts and/or fail to - observe the subnet mask when calculating the broadcast - address. While some of these problems can be fixed by - changing the client configuration, other problems may force - the retirement of the client systems in question or the - abandonment of /var/yp/securenets. - - Using /var/yp/securenets on a - server with such an archaic implementation of TCP/IP is a - really bad idea and will lead to loss of NIS functionality - for large parts of your network. - - TCP Wrappers - The use of the TCP Wrapper - package increases the latency of your NIS server. The - additional delay may be long enough to cause timeouts in - client programs, especially in busy networks or with slow - NIS servers. If one or more of your client systems - suffers from these symptoms, you should convert the client - systems in question into NIS slave servers and force them - to bind to themselves. - - - - - Barring Some Users from Logging On + is an alternate mechanism + for providing access control instead of + securenets. While either access control + mechanism adds some security, they are both vulnerable to + IP spoofing attacks. All + NIS-related traffic should be blocked at + the firewall. + + Servers using securenets + may fail to serve legitimate NIS clients + with archaic TCP/IP implementations. Some of these + implementations set all host bits to zero when doing + broadcasts or fail to observe the subnet mask when + calculating the broadcast address. While some of these + problems can be fixed by changing the client configuration, + other problems may force the retirement of these client + systems or the abandonment of + securenets. + + TCP Wrapper + The use of TCP Wrapper + increases the latency of the NIS server. + The additional delay may be long enough to cause timeouts in + client programs, especially in busy networks with slow + NIS servers. If one or more clients suffer + from latency, convert those clients into + NIS slave servers and force them to bind to + themselves. - In our lab, there is a machine basie that - is supposed to be a faculty only workstation. We do not want - to take this machine out of the NIS domain, yet the - passwd file on the master NIS server - contains accounts for both faculty and students. What can we - do? - - There is a way to bar specific users from logging on to a - machine, even if they are present in the NIS database. To do - this, all you must do is add - -username to the - end of the /etc/master.passwd file on the - client machine, where username is - the username of the user you wish to bar from logging in. - This should preferably be done using vipw, - since vipw will sanity check your changes - to /etc/master.passwd, as well as - automatically rebuild the password database when you finish - editing. For example, if we wanted to bar user - bill from logging on to - basie we would: - - basie&prompt.root; vipw -[add -bill to the end, exit] -vipw: rebuilding the database... -vipw: done + + Barring Some Users -basie&prompt.root; cat /etc/master.passwd + In this example, the basie + system is a faculty workstation within the + NIS domain. The + passwd map on the master + NIS server contains accounts for both + faculty and students. This section demonstrates how to + allow faculty logins on this system while refusing student + logins. + + To prevent specified users from logging on to a system, + even if they are present in the NIS + database, use vipw to add + -username with + the correct number of colons towards the end of + /etc/master.passwd on the client, + where username is the username of + a user to bar from logging in. The line with the blocked + user must be before the + line that + allows NIS users. In this example, + bill is barred + from logging on to basie: + basie&prompt.root; cat /etc/master.passwd root:[password]:0:0::0:0:The super-user:/root:/bin/csh toor:[password]:0:0::0:0:The other super-user:/root:/bin/sh daemon:*:1:1::0:0:Owner of many system processes:/root:/sbin/nologin @@ -1891,169 +1814,160 @@ xten:*:67:67::0:0:X-10 daemon:/usr/local/xten:/sbin/nologin pop:*:68:6::0:0:Post Office Owner:/nonexistent:/sbin/nologin nobody:*:65534:65534::0:0:Unprivileged user:/nonexistent:/sbin/nologin +-bill::::::::: +::::::::: --bill basie&prompt.root; + - Using Netgroups - - UdoErdelhoffContributed by - - + + + Using Netgroups - netgroups - The method shown in the previous section works reasonably - well if you need special rules for a very small number of - users and/or machines. On larger networks, you - will forget to bar some users from logging - onto sensitive machines, or you may even have to modify each - machine separately, thus losing the main benefit of NIS: - centralized administration. - - The NIS developers' solution for this problem is called - netgroups. Their purpose and semantics - can be compared to the normal groups used by &unix; file - systems. The main differences are the lack of a numeric ID - and the ability to define a netgroup by including both user - accounts and other netgroups. + Barring specified users from logging on to individual + systems becomes unscaleable on larger networks and quickly + loses the main benefit of NIS: + centralized administration. Netgroups were developed to handle large, complex networks - with hundreds of users and machines. On one hand, this is - a Good Thing if you are forced to deal with such a situation. - On the other hand, this complexity makes it almost impossible to - explain netgroups with really simple examples. The example - used in the remainder of this section demonstrates this - problem. - - Let us assume that your successful introduction of NIS in - your laboratory caught your superiors' interest. Your next - job is to extend your NIS domain to cover some of the other - machines on campus. The two tables contain the names of the - new users and new machines as well as brief descriptions of - them. + with hundreds of users and machines. Their use is comparable + to &unix; groups, where the main difference is the lack of a + numeric ID and the ability to define a netgroup by including + both user accounts and other netgroups. + + To expand on the example used in this chapter, the + NIS domain will be extended to add the + users and systems shown in Tables 28.2 and 28.3: - - - - - User Name(s) - Description - - - - - - alpha, beta - Normal employees of the IT department - - - - charlie, delta - The new apprentices of the IT department - - - - echo, foxtrott, golf, ... - Ordinary employees - - - - able, baker, ... - The current interns - - - - + + Additional Users - - - - - Machine Name(s) - Description - - - - - - - - war, death, - famine, - pollution - Your most important servers. Only the IT - employees are allowed to log onto these - machines. - - - - - pride, greed, - envy, wrath, - lust, sloth - Less important servers. All members of the IT - department are allowed to login onto these - machines. - - - - one, two, - three, four, - ... - - Ordinary workstations. Only the - real employees are allowed to use - these machines. - - - - trashcan - A very old machine without any critical data. - Even the intern is allowed to use this box. - - - - + + + + User Name(s) + Description + + + + + + alpha, + beta + IT department employees + + + + charlie, delta + IT department apprentices + + + + echo, + foxtrott, + golf, + ... + employees + + + + able, + baker, + ... + interns + + + +
+ + + Additional Systems + + + + + Machine Name(s) + Description + + + + + + + war, + death, + famine, + pollution + Only IT employees are allowed to log onto these + servers. + + + + + pride, + greed, + envy, + wrath, + lust, + sloth + All members of the IT department are allowed to + login onto these servers. + + + + one, + two, + three, + four, + ... + Ordinary workstations used by + employees. + + + + trashcan + A very old machine without any critical data. + Even interns are allowed to use this system. + + + +
- If you tried to implement these restrictions by separately - blocking each user, you would have to add one - -user line to - each system's passwd for each user who is - not allowed to login onto that system. If you forget just one - entry, you could be in trouble. It may be feasible to do this - correctly during the initial setup, however you - will eventually forget to add the lines - for new users during day-to-day operations. After all, Murphy - was an optimist. - - Handling this situation with netgroups offers several - advantages. Each user need not be handled separately; you - assign a user to one or more netgroups and allow or forbid - logins for all members of the netgroup. If you add a new - machine, you will only have to define login restrictions for - netgroups. If a new user is added, you will only have to add - the user to one or more netgroups. Those changes are - independent of each other: no more for each combination - of user and machine do... If your NIS setup is planned - carefully, you will only have to modify exactly one central - configuration file to grant or deny access to machines. - - The first step is the initialization of the NIS map - netgroup. FreeBSD's &man.ypinit.8; does not create this map by - default, but its NIS implementation will support it once it has - been created. To create an empty map, simply type - - ellington&prompt.root; vi /var/yp/netgroup - - and start adding content. For our example, we need at - least four netgroups: IT employees, IT apprentices, normal - employees and interns. + When using netgroups to configure this scenario, each user + is assigned to one or more netgroups and logins are then + allowed or forbidden for all members of the netgroup. When + adding a new machine, login restrictions must be defined for + all netgroups. When a new user is added, the account must be + added to one or more netgroups. If the + NIS setup is planned carefully, only one + central configuration file needs modification to grant or deny + access to machines. + + The first step is the initialization of the + NIS netgroup map. In + &os;, this map is not created by default. On the + NIS master server, use an editor to create + a map named /var/yp/netgroup. + + This example creates four netgroups to represent IT + employees, IT apprentices, employees, and interns: IT_EMP (,alpha,test-domain) (,beta,test-domain) IT_APP (,charlie,test-domain) (,delta,test-domain) @@ -2061,86 +1975,81 @@ (,golf,test-domain) INTERNS (,able,test-domain) (,baker,test-domain) - IT_EMP, IT_APP etc. - are the names of the netgroups. Each bracketed group adds - one or more user accounts to it. The three fields inside a - group are: + Each entry configures a netgroup. The first column in an + entry is the name of the netgroup. Each set of brackets + represents either a group of one or more users or the name of + another netgroup. When specifying a user, the three + comma-delimited fields inside each group represent: - - The name of the host(s) where the following items are - valid. If you do not specify a hostname, the entry is - valid on all hosts. If you do specify a hostname, you - will enter a realm of darkness, horror and utter confusion. - - - - The name of the account that belongs to this - netgroup. - - - - The NIS domain for the account. You can import - accounts from other NIS domains into your netgroup if you - are one of the unlucky fellows with more than one NIS - domain. - + + The name of the host(s) where the other fields + representing the user are valid. If a hostname is not + specified, the entry is valid on all hosts. + + + + The name of the account that belongs to this + netgroup. + + + + The NIS domain for the account. + Accounts may be imported from other NIS + domains into a netgroup. + - Each of these fields can contain wildcards. See - &man.netgroup.5; for details. + If a group contains multiple users, separate each user + with whitespace. Additionally, each field may contain + wildcards. See &man.netgroup.5; for details. - - netgroups - Netgroup names longer than 8 characters should not be - used, especially if you have machines running other - operating systems within your NIS domain. The names are - case sensitive; using capital letters for your netgroup - names is an easy way to distinguish between user, machine - and netgroup names. - - Some NIS clients (other than FreeBSD) cannot handle - netgroups with a large number of entries. For example, some - older versions of &sunos; start to cause trouble if a netgroup - contains more than 15 entries. You can - circumvent this limit by creating several sub-netgroups with - 15 users or less and a real netgroup that consists of the - sub-netgroups: + netgroups + Netgroup names longer than 8 characters should not be + The names are case sensitive and using capital letters + for netgroup names is an easy way to distinguish + between user, machine and netgroup names. + + Some non-&os; NIS clients cannot + handle netgroups containing more than 15 entries. This + limit may be circumvented by creating several sub-netgroups + with 15 users or fewer and a real netgroup consisting of the + sub-netgroups, as seen in this example: - BIGGRP1 (,joe1,domain) (,joe2,domain) (,joe3,domain) [...] + BIGGRP1 (,joe1,domain) (,joe2,domain) (,joe3,domain) [...] BIGGRP2 (,joe16,domain) (,joe17,domain) [...] BIGGRP3 (,joe31,domain) (,joe32,domain) BIGGROUP BIGGRP1 BIGGRP2 BIGGRP3 - You can repeat this process if you need more than 225 - users within a single netgroup. - + Repeat this process if more than 225 (15 times 15) users + exist within a single netgroup. - Activating and distributing your new NIS map is - easy: + To activate and distribute the new + NIS map: ellington&prompt.root; cd /var/yp ellington&prompt.root; make - This will generate the three NIS maps - netgroup, - netgroup.byhost and - netgroup.byuser. Use &man.ypcat.1; to - check if your new NIS maps are available: + This will generate the three NIS maps + netgroup, + netgroup.byhost and + netgroup.byuser. Use the map key option + of &man.ypcat.1; to check if the new NIS + maps are available: ellington&prompt.user; ypcat -k netgroup ellington&prompt.user; ypcat -k netgroup.byhost ellington&prompt.user; ypcat -k netgroup.byuser The output of the first command should resemble the - contents of /var/yp/netgroup. The second - command will not produce output if you have not specified - host-specific netgroups. The third command can be used to - get the list of netgroups for a user. - - The client setup is quite simple. To configure the server - war, you only have to start - &man.vipw.8; and replace the line + contents of /var/yp/netgroup. The second + command only produces output if host-specific netgroups were + created. The third command is used to get the list of + netgroups for a user. + + To configure a client, use &man.vipw.8; to specify the + name of the netgroup. For example, on the server named + war, replace this line: +::::::::: @@ -2148,115 +2057,95 @@ +@IT_EMP::::::::: - Now, only the data for the users defined in the netgroup - IT_EMP is imported into - war's password database and only - these users are allowed to login. + This specifies that only the users defined in the netgroup + IT_EMP will be imported into this system's + password database and only those users are allowed to login to + this system. - Unfortunately, this limitation also applies to the + This configuration also applies to the ~ function of the shell and all routines - converting between user names and numerical user IDs. In - other words, cd - ~user will not work, - ls -l will show the numerical ID instead of - the username and find . -user joe -print - will fail with No such user. To fix - this, you will have to import all user entries - without allowing them to login onto your - servers. - - This can be achieved by adding another line to - /etc/master.passwd. This line should - contain: - - +:::::::::/sbin/nologin, meaning - Import all entries but replace the shell with - /sbin/nologin in the imported - entries. You can replace any field in the - passwd entry by placing a default value in - your /etc/master.passwd. + which convert between user names and numerical user IDs. In + other words, + cd ~user will + not work, ls -l will show the numerical ID + instead of the username, and find . -user joe + -print will fail with the message + No such user. To fix this, import all + user entries without allowing them to login into the servers. + This can be achieved by adding an extra line: + + +:::::::::/sbin/nologin + + This line configures the client to import all entries but + to replace the shell in those entries with + /sbin/nologin. - - Make sure that the line - +:::::::::/sbin/nologin is placed after - +@IT_EMP:::::::::. Otherwise, all user - accounts imported from NIS will have /sbin/nologin as their - login shell. - - - After this change, you will only have to change one NIS - map if a new employee joins the IT department. You could use - a similar approach for the less important servers by replacing - the old +::::::::: in their local version - of /etc/master.passwd with something like - this: + Make sure that extra line is placed + after + +@IT_EMP:::::::::. Otherwise, all user + accounts imported from NIS will have + /sbin/nologin as their login + shell and no one will be able to login to the system. + + To configure the less important servers, replace the old + +::::::::: on the servers with these + lines: +@IT_EMP::::::::: +@IT_APP::::::::: +:::::::::/sbin/nologin - The corresponding lines for the normal workstations - could be: + The corresponding lines for the workstations + would be: +@IT_EMP::::::::: +@USERS::::::::: +:::::::::/sbin/nologin - And everything would be fine until there is a policy - change a few weeks later: The IT department starts hiring - interns. The IT interns are allowed to use the normal - workstations and the less important servers; and the IT - apprentices are allowed to login onto the main servers. You - add a new netgroup IT_INTERN, add the new - IT interns to this netgroup and start to change the - configuration on each and every machine... As the old saying - goes: Errors in centralized planning lead to global - mess. - - NIS' ability to create netgroups from other netgroups can - be used to prevent situations like these. One possibility - is the creation of role-based netgroups. For example, you - could create a netgroup called - BIGSRV to define the login - restrictions for the important servers, another netgroup - called SMALLSRV for the less - important servers and a third netgroup called - USERBOX for the normal - workstations. Each of these netgroups contains the netgroups - that are allowed to login onto these machines. The new - entries for your NIS map netgroup should look like this: + NIS supports the creation of netgroups from other + netgroups which can be useful if the policy regarding user + access changes. One possibility is the creation of role-based + netgroups. For example, one might create a netgroup called + BIGSRV to define the login restrictions for + the important servers, another netgroup called + SMALLSRV for the less important servers, + and a third netgroup called USERBOX for the + workstations. Each of these netgroups contains the netgroups + that are allowed to login onto these machines. The new + entries for the NIS + netgroup map would look like this: BIGSRV IT_EMP IT_APP SMALLSRV IT_EMP IT_APP ITINTERN USERBOX IT_EMP ITINTERN USERS This method of defining login restrictions works - reasonably well if you can define groups of machines with - identical restrictions. Unfortunately, this is the exception - and not the rule. Most of the time, you will need the ability - to define login restrictions on a per-machine basis. - - Machine-specific netgroup definitions are the other - possibility to deal with the policy change outlined above. In - this scenario, the /etc/master.passwd of - each box contains two lines starting with +. - The first of them adds a netgroup with the accounts allowed to - login onto this machine, the second one adds all other - accounts with /sbin/nologin as shell. It - is a good idea to use the ALL-CAPS version of - the machine name as the name of the netgroup. In other words, - the lines should look like this: + reasonably well when it is possible to define groups of + machines with identical restrictions. Unfortunately, this is + the exception and not the rule. Most of the time, the ability + to define login restrictions on a per-machine basis is + required. + + Machine-specific netgroup definitions are another + possibility to deal with the policy changes. In this + scenario, the /etc/master.passwd of each + system contains two lines starting with +. + The first line adds a netgroup with the accounts allowed to + login onto this machine and the second line adds all other + accounts with /sbin/nologin as shell. It + is recommended to use the ALL-CAPS version of + the hostname as the name of the netgroup: +@BOXNAME::::::::: +:::::::::/sbin/nologin - Once you have completed this task for all your machines, - you will not have to modify the local versions of - /etc/master.passwd ever again. All - further changes can be handled by modifying the NIS map. Here - is an example of a possible netgroup map for this - scenario with some additional goodies: + Once this task is completed on all the machines, there is + no longer a need to modify the local versions of + /etc/master.passwd ever again. All + further changes can be handled by modifying the + NIS map. Here is an example of a possible + netgroup map for this scenario: # Define groups of users first IT_EMP (,alpha,test-domain) (,beta,test-domain) @@ -2294,802 +2183,1059 @@ TWO (,hotel,test-domain) # [...more groups to follow] - If you are using some kind of database to manage your user - accounts, you should be able to create the first part of the - map with your database's report tools. This way, new users - will automatically have access to the boxes. - - One last word of caution: It may not always be advisable - to use machine-based netgroups. If you are deploying a couple of - dozen or even hundreds of identical machines for student labs, - you should use role-based netgroups instead of machine-based - netgroups to keep the size of the NIS map within reasonable - limits. -
- - - Important Things to Remember - - There are still a couple of things that you will need to do - differently now that you are in an NIS environment. - - - - Every time you wish to add a user to the lab, you - must add it to the master NIS server only, - and you must remember to rebuild the NIS - maps. If you forget to do this, the new user will - not be able to login anywhere except on the NIS master. - For example, if we needed to add a new user - jsmith to the lab, we would: - - &prompt.root; pw useradd jsmith -&prompt.root; cd /var/yp -&prompt.root; make test-domain - - You could also run adduser jsmith instead - of pw useradd jsmith. - - - Keep the administration accounts out of the - NIS maps. You do not want to be propagating - administrative accounts and passwords to machines that - will have users that should not have access to those - accounts. - - - Keep the NIS master and slave secure, and - minimize their downtime. If somebody either - hacks or simply turns off these machines, they have - effectively rendered many people without the ability to - login to the lab. - - This is the chief weakness of any centralized administration - system. If you do - not protect your NIS servers, you will have a lot of angry - users! - - - - - - NIS v1 Compatibility - - FreeBSD's ypserv has some - support for serving NIS v1 clients. FreeBSD's NIS - implementation only uses the NIS v2 protocol, however other - implementations include support for the v1 protocol for - backwards compatibility with older systems. The - ypbind daemons supplied with these - systems will try to establish a binding to an NIS v1 server - even though they may never actually need it (and they may - persist in broadcasting in search of one even after they - receive a response from a v2 server). Note that while support - for normal client calls is provided, this version of - ypserv does not handle v1 map - transfer requests; consequently, it cannot be used as a master - or slave in conjunction with older NIS servers that only - support the v1 protocol. Fortunately, there probably are not - any such servers still in use today. - - - - NIS Servers That Are Also NIS Clients - - Care must be taken when running - ypserv in a multi-server domain - where the server machines are also NIS clients. It is - generally a good idea to force the servers to bind to - themselves rather than allowing them to broadcast bind - requests and possibly become bound to each other. Strange - failure modes can result if one server goes down and others - are dependent upon it. Eventually all the clients will time - out and attempt to bind to other servers, but the delay - involved can be considerable and the failure mode is still - present since the servers might bind to each other all over - again. - - You can force a host to bind to a particular server by running - ypbind with the - flag. If you do not want to do this manually each time you - reboot your NIS server, you can add the following lines to - your /etc/rc.conf: - - nis_client_enable="YES" # run client stuff as well -nis_client_flags="-S NIS domain,server" - - See &man.ypbind.8; for further information. + It may not always be advisable + to use machine-based netgroups. When deploying a couple of + dozen or hundreds of systems, + role-based netgroups instead of machine-based netgroups may be + used to keep the size of the NIS map within + reasonable limits. Password Formats + - NIS - password formats + NIS + password formats - One of the most common issues that people run into when trying - to implement NIS is password format compatibility. If your NIS - server is using DES encrypted passwords, it will only support - clients that are also using DES. For example, if you have - &solaris; NIS clients in your network, then you will almost certainly - need to use DES encrypted passwords. - - To check which format your servers - and clients are using, look at /etc/login.conf. - If the host is configured to use DES encrypted passwords, then the - default class will contain an entry like this: + NIS requires that all hosts within an + NIS domain use the same format for + encrypting passwords. If users have trouble authenticating on + an NIS client, it may be due to a differing + password format. In a heterogeneous network, the format must + be supported by all operating systems, where + DES is the lowest common standard. + + To check which format a server or client is using, look + at this section of + /etc/login.conf: default:\ :passwd_format=des:\ :copyright=/etc/COPYRIGHT:\ [Further entries elided] - Other possible values for the passwd_format - capability include blf and md5 - (for Blowfish and MD5 encrypted passwords, respectively). - - If you have made changes to - /etc/login.conf, you will also need to - rebuild the login capability database, which is achieved by - running the following command as - root: + In this example, the system is using the + DES format. Other possible values are + blf for Blowfish and md5 + for MD5 encrypted passwords. + + If the format on a host needs to be edited to match the + one being used in the NIS domain, the + login capability database must be rebuilt after saving the + change: &prompt.root; cap_mkdb /etc/login.conf - The format of passwords already in - /etc/master.passwd will not be updated - until a user changes his password for the first time - after the login capability database is - rebuilt. - - Next, in order to ensure that passwords are encrypted with - the format that you have chosen, you should also check that - the crypt_default in - /etc/auth.conf gives precedence to your - chosen password format. To do this, place the format that you - have chosen first in the list. For example, when using DES - encrypted passwords, the entry would be: - - crypt_default = des blf md5 - - Having followed the above steps on each of the &os; based - NIS servers and clients, you can be sure that they all agree - on which password format is used within your network. If you - have trouble authenticating on an NIS client, this is a pretty - good place to start looking for possible problems. Remember: - if you want to deploy an NIS server for a heterogenous - network, you will probably have to use DES on all systems - because it is the lowest common standard. + + The format of passwords for existing user accounts will + not be updated until each user changes their password + after the login capability database is + rebuilt. +
- - Automatic Network Configuration (DHCP) + + + Lightweight Directory Access Protocol + (<acronym>LDAP</acronym>) + - GregSutterWritten by + + + Tom + Rhodes + + Written by + - - - - What Is DHCP? - - Dynamic Host Configuration Protocol - DHCP - - - Internet Software Consortium (ISC) - - DHCP, the Dynamic Host Configuration Protocol, describes - the means by which a system can connect to a network and obtain the - necessary information for communication upon that network. FreeBSD - versions prior to 6.0 use the ISC (Internet Software - Consortium) DHCP client (&man.dhclient.8;) implementation. - Later versions use the OpenBSD dhclient - taken from OpenBSD 3.7. All - information here regarding dhclient is for - use with either of the ISC or OpenBSD DHCP clients. The DHCP - server is the one included in the ISC distribution. - + LDAP - - What This Section Covers + The Lightweight Directory Access Protocol + (LDAP) is an application layer protocol used + to access, modify, and authenticate objects using a distributed + directory information service. Think of it as a phone or record + book which stores several levels of hierarchical, homogeneous + information. It is used in Active Directory and + OpenLDAP networks and allows users to + access to several levels of internal information utilizing a + single account. For example, email authentication, pulling + employee contact information, and internal website + authentication might all make use of a single user account in + the LDAP server's record base. + + This section provides a quick start guide for configuring an + LDAP server on a &os; system. It assumes + that the administrator already has a design plan which includes + the type of information to store, what that information will be + used for, which users should have access to that information, + and how to secure this information from unauthorized + access. + + + <acronym>LDAP</acronym> Terminology and Structure + + LDAP uses several terms which should be + understood before starting the configuration. All directory + entries consist of a group of + attributes. Each of these attribute + sets contains a unique identifier known as a + Distinguished Name + (DN) which is normally built from several + other attributes such as the common or + Relative Distinguished Name + (RDN). Similar to how directories have + absolute and relative paths, consider a DN + as an absolute path and the RDN as the + relative path. + + An example LDAP entry looks like the + following. This example searches for the entry for the + specified user account (uid), + organizational unit (ou), and organization + (o): - This section describes both the client-side components of the ISC and OpenBSD DHCP client and - server-side components of the ISC DHCP system. The - client-side program, dhclient, comes - integrated within FreeBSD, and the server-side portion is - available from the net/isc-dhcp3-server port. The - &man.dhclient.8;, &man.dhcp-options.5;, and - &man.dhclient.conf.5; manual pages, in addition to the - references below, are useful resources. - + &prompt.user; ldapsearch -xb "uid=trhodes,ou=users,o=example.com" +# extended LDIF +# +# LDAPv3 +# base <uid=trhodes,ou=users,o=example.com> with scope subtree +# filter: (objectclass=*) +# requesting: ALL +# - - How It Works - UDP - When dhclient, the DHCP client, is - executed on the client machine, it begins broadcasting - requests for configuration information. By default, these - requests are on UDP port 68. The server replies on UDP 67, - giving the client an IP address and other relevant network - information such as netmask, router, and DNS servers. All of - this information comes in the form of a DHCP - lease and is only valid for a certain time - (configured by the DHCP server maintainer). In this manner, - stale IP addresses for clients no longer connected to the - network can be automatically reclaimed. - - DHCP clients can obtain a great deal of information from - the server. An exhaustive list may be found in - &man.dhcp-options.5;. - +# trhodes, users, example.com +dn: uid=trhodes,ou=users,o=example.com +mail: trhodes@example.com +cn: Tom Rhodes +uid: trhodes +telephoneNumber: (123) 456-7890 + +# search result +search: 2 +result: 0 Success + +# numResponses: 2 +# numEntries: 1 + + This example entry shows the values for the + dn, mail, + cn, uid, and + telephoneNumber attributes. The + cn attribute is the + RDN. + + More information about LDAP and its + terminology can be found at http://www.openldap.org/doc/admin24/intro.html. + + + + Configuring an <acronym>LDAP</acronym> Server + + LDAP Server + + &os; does not provide a built-in LDAP + server. Begin the configuration by installing the net/openldap24-server package or port. + Since the port has many configurable options, it is + recommended that the default options are reviewed to see if + the package is sufficient, and to instead compile the port if + any options should be changed. In most cases, the defaults + are fine. However, if SQL support is needed, this option must + be enabled and the port compiled using the instructions in + . + + Next, create the directories to hold the data and to store + the certificates: + + &prompt.root; mkdir /var/db/openldap-data +&prompt.root; mkdir /usr/local/etc/openldap/private + + Copy over the database configuration file: + + &prompt.root; cp /usr/local/etc/openldap/DB_CONFIG.example /var/db/openldap-data/DB_CONFIG + + The next phase is to configure the certificate authority. + The following commands must be executed from + /usr/local/etc/openldap/private. This is + important as the file permissions need to be restrictive and + users should not have access to these files. To create the + certificate authority, start with this command and follow the + prompts: + + &prompt.root; openssl req -days 365 -nodes -new -x509 -keyout ca.key -out ../ca.crt + + The entries for the prompts may be generic + except for the + Common Name. This entry must be + different than the system hostname. If + this will be a self signed certificate, prefix the hostname + with CA for certificate authority. + + The next task is to create a certificate signing request + and a private key. Input this command and follow the + prompts: + + &prompt.root; openssl req -days 365 -nodes -new -keyout server.key -out server.csr + + During the certificate generation process, be sure to + correctly set the Common Name attribute. + Once complete, sign the key: + + &prompt.root; openssl x509 -req -days 365 -in server.csr -out ../server.crt -CA ../ca.crt -CAkey ca.key -CAcreateserial + + The final part of the certificate generation process is to + generate and sign the client certificates: + + &prompt.root; openssl req -days 365 -nodes -new -keyout client.key -out client.csr +&prompt.root; openssl x509 -req -days 3650 -in client.csr -out ../client.crt -CA ../ca.crt -CAkey ca.key + + Remember to use the same Common Name + attribute when prompted. When finished, ensure that a total + of eight (8) new files have been generated through the + proceeding commands. If so, the next step is to edit + /usr/local/etc/openldap/slapd.conf and + add the following options: + + TLSCipherSuite HIGH:MEDIUM:+SSLv3 +TLSCertificateFile /usr/local/etc/openldap/server.crt +TLSCertificateKeyFile /usr/local/etc/openldap/private/server.key +TLSCACertificateFile /usr/local/etc/openldap/ca.crt + + Then, edit + /usr/local/etc/openldap/ldap.conf and add + the following lines: + + TLS_CACERT /usr/local/etc/openldap/ca.crt +TLS_CIPHER_SUITE HIGH:MEDIUM:+SSLv3 + + While editing this file, uncomment the following entries + and set them to the desired values: , + , and + . Set the to + contain and + . Then, add two entries pointing to + the certificate authority. When finished, the entries should + look similar to the following: + + BASE dc=example,dc=com +URI ldap:// ldaps:// + +SIZELIMIT 12 +TIMELIMIT 15 + +TLS_CACERT /usr/local/etc/openldap/ca.crt +TLS_CIPHER_SUITE HIGH:MEDIUM:+SSLv3 + + The default password for the server should then be + changed: + + &prompt.root; slappasswd -h "{SHA}" >> /usr/local/etc/openldap/slapd.conf + + This command will prompt for the password and, if the + process does not fail, a password hash will be added to the + end of slapd.conf. Several hashing + formats are supported. Refer to the manual page for + slappasswd for more information. + + Next, edit + /usr/local/etc/openldap/slapd.conf and + add the following lines: + + password-hash {sha} +allow bind_v2 + + The in this file must be updated + to match the used in + /usr/local/etc/openldap/ldap.conf and + should also be set. A recommended + value for is something like + . Before saving this file, place + the in front of the password output + from slappasswd and delete the old + . The end result should + look similar to this: + + TLSCipherSuite HIGH:MEDIUM:+SSLv3 +TLSCertificateFile /usr/local/etc/openldap/server.crt +TLSCertificateKeyFile /usr/local/etc/openldap/private/server.key +TLSCACertificateFile /usr/local/etc/openldap/ca.crt +rootpw {SHA}W6ph5Mm5Pz8GgiULbPgzG37mj9g= + + Finally, enable the OpenLDAP + service in /etc/rc.conf and set the + URI: + + slapd_enable="YES" +slapd_flags="-4 -h ldaps:///" + + At this point the server can be started and tested: + + &prompt.root; service slapd start + + If everything is configured correctly, a search of the + directory should show a successful connection with a single + response as in this example: - - FreeBSD Integration + &prompt.root; ldapsearch -Z +# extended LDIF +# +# LDAPv3 +# base <dc=example,dc=com> (default) with scope subtree +# filter: (objectclass=*) +# requesting: ALL +# - &os; fully integrates the ISC or OpenBSD DHCP client, - dhclient (according to the &os; version you run). DHCP client support is provided - within both the installer and the base system, obviating the need - for detailed knowledge of network configurations on any network - that runs a DHCP server. dhclient has been - included in all FreeBSD distributions since 3.2. - - sysinstall - - - DHCP is supported by - sysinstall. When configuring a - network interface within - sysinstall, the second question - asked is: Do you want to try DHCP configuration of - the interface?. Answering affirmatively will - execute dhclient, and if successful, will - fill in the network configuration information - automatically. - - There are two things you must do to have your system use - DHCP upon startup: - - DHCP - requirements - - - - Make sure that the bpf - device is compiled into your kernel. To do this, add - device bpf (pseudo-device - bpf under &os; 4.X) to your kernel - configuration file, and rebuild the kernel. For more - information about building kernels, see . The - bpf device is already part of - the GENERIC kernel that is supplied - with FreeBSD, so if you do not have a custom kernel, you - should not need to create one in order to get DHCP - working. - - For those who are particularly security conscious, - you should be warned that bpf - is also the device that allows packet sniffers to work - correctly (although they still have to be run as - root). bpf - is required to use DHCP, but if - you are very sensitive about security, you probably - should not add bpf to your - kernel in the expectation that at some point in the - future you will be using DHCP. - - - - Edit your /etc/rc.conf to - include the following: - - ifconfig_fxp0="DHCP" - - - Be sure to replace fxp0 with the - designation for the interface that you wish to dynamically - configure, as described in - . - - - If you are using a different location for - dhclient, or if you wish to pass additional - flags to dhclient, also include the - following (editing as necessary): - - dhcp_program="/sbin/dhclient" -dhcp_flags="" - - - - - DHCP - server - - The DHCP server, dhcpd, is included - as part of the net/isc-dhcp3-server port in the ports - collection. This port contains the ISC DHCP server and - documentation. - +# search result +search: 3 +result: 32 No such object - - Files - - DHCP - configuration files - - - /etc/dhclient.conf - dhclient requires a configuration file, - /etc/dhclient.conf. Typically the file - contains only comments, the defaults being reasonably sane. This - configuration file is described by the &man.dhclient.conf.5; - manual page. - - - /sbin/dhclient - dhclient is statically linked and - resides in /sbin. The &man.dhclient.8; - manual page gives more information about - dhclient. - - - /sbin/dhclient-script - dhclient-script is the FreeBSD-specific - DHCP client configuration script. It is described in - &man.dhclient-script.8;, but should not need any user - modification to function properly. - - - /var/db/dhclient.leases - The DHCP client keeps a database of valid leases in this - file, which is written as a log. &man.dhclient.leases.5; - gives a slightly longer description. - - - +# numResponses: 1 - - Further Reading + + If the command fails and the configuration looks + correct, stop the slapd service and + restart it with debugging options: - The DHCP protocol is fully described in - RFC 2131. - An informational resource has also been set up at - http://www.dhcp.org/. - + &prompt.root; service slapd stop +&prompt.root; /usr/local/libexec/slapd -d -1 + - - Installing and Configuring a DHCP Server + Once the service is responding, the directory can be + populated using ldapadd. In this example, + a file containing this list of users is first created. Each + user should use the following format: + + dn: dc=example,dc=com +objectclass: dcObject +objectclass: organization +o: Example +dc: Example + +dn: cn=Manager,dc=example,dc=com +objectclass: organizationalRole +cn: Manager + + To import this file, specify the file name. The following + command will prompt for the password specified earlier and the + output should look something like this: + + &prompt.root; ldapadd -Z -D "cn=Manager,dc=example,dc=com" -W -f import.ldif +Enter LDAP Password: +adding new entry "dc=example,dc=com" - - What This Section Covers +adding new entry "cn=Manager,dc=example,dc=com" - This section provides information on how to configure - a FreeBSD system to act as a DHCP server using the ISC - (Internet Software Consortium) implementation of the DHCP - suite. - - The server portion of the suite is not provided as part of - FreeBSD, and so you will need to install the - net/isc-dhcp3-server - port to provide this service. See for - more information on using the Ports Collection. - - - - DHCP Server Installation - - DHCP - installation - - In order to configure your FreeBSD system as a DHCP - server, you will need to ensure that the &man.bpf.4; - device is compiled into your kernel. To do this, add - device bpf (pseudo-device - bpf under &os; 4.X) to your kernel - configuration file, and rebuild the kernel. For more - information about building kernels, see . - - The bpf device is already - part of the GENERIC kernel that is - supplied with FreeBSD, so you do not need to create a custom - kernel in order to get DHCP working. - - - Those who are particularly security conscious - should note that bpf - is also the device that allows packet sniffers to work - correctly (although such programs still need privileged - access). bpf - is required to use DHCP, but if - you are very sensitive about security, you probably - should not include bpf in your - kernel purely because you expect to use DHCP at some - point in the future. - - - The next thing that you will need to do is edit the sample - dhcpd.conf which was installed by the - net/isc-dhcp3-server port. - By default, this will be - /usr/local/etc/dhcpd.conf.sample, and you - should copy this to - /usr/local/etc/dhcpd.conf before proceeding - to make changes. - - - - Configuring the DHCP Server - - DHCP - dhcpd.conf - - dhcpd.conf is - comprised of declarations regarding subnets and hosts, and is - perhaps most easily explained using an example : + Verify the data was added by issuing a search on the + server using ldapsearch: - option domain-name "example.com"; -option domain-name-servers 192.168.4.100; -option subnet-mask 255.255.255.0; + &prompt.user; ldapsearch -Z +# extended LDIF +# +# LDAPv3 +# base <dc=example,dc=com> (default) with scope subtree +# filter: (objectclass=*) +# requesting: ALL +# -default-lease-time 3600; -max-lease-time 86400; -ddns-update-style none; +# example.com +dn: dc=example,dc=com +objectClass: dcObject +objectClass: organization +o: Example +dc: Example + +# Manager, example.com +dn: cn=Manager,dc=example,dc=com +objectClass: organizationalRole +cn: Manager + +# search result +search: 3 +result: 0 Success -subnet 192.168.4.0 netmask 255.255.255.0 { - range 192.168.4.129 192.168.4.254; - option routers 192.168.4.1; -} +# numResponses: 3 +# numEntries: 2 -host mailhost { - hardware ethernet 02:03:04:05:06:07; - fixed-address mailhost.example.com; -} + At this point, the server should be configured and + functioning properly. + + - - - This option specifies the domain that will be provided - to clients as the default search domain. See - &man.resolv.conf.5; for more information on what this - means. - - - - This option specifies a comma separated list of DNS - servers that the client should use. - - - - The netmask that will be provided to clients. - - - - A client may request a specific length of time that a - lease will be valid. Otherwise the server will assign - a lease with this expiry value (in seconds). - - - - This is the maximum length of time that the server will - lease for. Should a client request a longer lease, a lease - will be issued, although it will only be valid for - max-lease-time seconds. - - - - This option specifies whether the DHCP server should - attempt to update DNS when a lease is accepted or released. - In the ISC implementation, this option is - required. - - - - This denotes which IP addresses should be used in - the pool reserved for allocating to clients. IP - addresses between, and including, the ones stated are - handed out to clients. - - - - Declares the default gateway that will be provided to - clients. - - - - The hardware MAC address of a host (so that the DHCP server - can recognize a host when it makes a request). - - - - Specifies that the host should always be given the - same IP address. Note that using a hostname is - correct here, since the DHCP server will resolve the - hostname itself before returning the lease - information. - - - - Once you have finished writing your - dhcpd.conf, you can proceed to start the - server by issuing the following command: - - &prompt.root; /usr/local/etc/rc.d/isc-dhcpd.sh start - - Should you need to make changes to the configuration of your - server in the future, it is important to note that sending a - SIGHUP signal to - dhcpd does not - result in the configuration being reloaded, as it does with most - daemons. You will need to send a SIGTERM - signal to stop the process, and then restart it using the command - above. - - - - Files - - DHCP - configuration files - - - /usr/local/sbin/dhcpd - dhcpd is statically linked and - resides in /usr/local/sbin. The - &man.dhcpd.8; manual page installed with the - port gives more information about - dhcpd. - + + + Dynamic Host Configuration Protocol + (<acronym>DHCP</acronym>) - /usr/local/etc/dhcpd.conf - dhcpd requires a configuration - file, /usr/local/etc/dhcpd.conf before it - will start providing service to clients. This file needs to - contain all the information that should be provided to clients - that are being serviced, along with information regarding the - operation of the server. This configuration file is described - by the &man.dhcpd.conf.5; manual page installed - by the port. - + + Dynamic Host Configuration Protocol + DHCP + + + Internet Systems Consortium (ISC) + - /var/db/dhcpd.leases - The DHCP server keeps a database of leases it has issued - in this file, which is written as a log. The manual page - &man.dhcpd.leases.5;, installed by the port - gives a slightly longer description. - + The Dynamic Host Configuration Protocol + (DHCP) allows a system to connect to a + network in order to be assigned the necessary addressing + information for communication on that network. &os; includes + the OpenBSD version of dhclient which is used + by the client to obtain the addressing information. &os; does + not install a DHCP server, but several + servers are available in the &os; Ports Collection. The + DHCP protocol is fully described in RFC + 2131. + Informational resources are also available at isc.org/downloads/dhcp/. + + This section describes how to use the built-in + DHCP client. It then describes how to + install and configure a DHCP server. + + + In &os;, the &man.bpf.4; device is needed by both the + DHCP server and DHCP + client. This device is included in the + GENERIC kernel that is installed with + &os;. Users who prefer to create a custom kernel need to keep + this device if DHCP is used. + + It should be noted that bpf also + allows privileged users to run network packet sniffers on + that system. + + + + Configuring a <acronym>DHCP</acronym> Client + + DHCP client support is included in the + &os; installer, making it easy to configure a newly installed + system to automatically receive its networking addressing + information from an existing DHCP server. + Refer to for examples of + network configuration. + + UDP + When dhclient is executed on the client + machine, it begins broadcasting requests for configuration + information. By default, these requests use + UDP port 68. The server replies on + UDP port 67, giving the client an + IP address and other relevant network + information such as a subnet mask, default gateway, and + DNS server addresses. This information is + in the form of a DHCP + lease and is valid for a configurable time. + This allows stale IP addresses for clients + no longer connected to the network to automatically be reused. + DHCP clients can obtain a great deal of + information from the server. An exhaustive list may be found + in &man.dhcp-options.5;. + + By default, when a &os; system boots, its + DHCP client runs in the background, or + asynchronously. Other startup scripts + continue to run while the DHCP process + completes, which speeds up system startup. + + Background DHCP works well when the + DHCP server responds quickly to the + client's requests. However, DHCP may take + a long time to complete on some systems. If network services + attempt to run before DHCP has assigned the + network addressing information, they will fail. Using + DHCP in synchronous + mode prevents this problem as it pauses startup until the + DHCP configuration has completed. + + This line in /etc/rc.conf is used to + configure background or asynchronous mode: + + ifconfig_fxp0="DHCP" + + This line may already exist if the system was configured + to use DHCP during installation. Replace + the fxp0 shown in these examples + with the name of the interface to be dynamically configured, + as described in . + + To instead configure the system to use synchronous mode, + and to pause during startup while DHCP + completes, use + SYNCDHCP: + + ifconfig_fxp0="SYNCDHCP" + + Additional client options are available. Search for + dhclient in &man.rc.conf.5; for + details. - /usr/local/sbin/dhcrelay - dhcrelay is used in advanced - environments where one DHCP server forwards a request from a - client to another DHCP server on a separate network. If you - require this functionality, then install the net/isc-dhcp3-relay port. The - &man.dhcrelay.8; manual page provided with the - port contains more detail. - - - + + DHCP + configuration files + - + The DHCP client uses the following + files: - + + + /etc/dhclient.conf - - Domain Name System (DNS) - - ChernLeeContributed by - - - + The configuration file used by + dhclient. Typically, this file + contains only comments as the defaults are suitable for + most clients. This configuration file is described in + &man.dhclient.conf.5;. + - - Overview - BIND + + /sbin/dhclient + + More information about the command itself can + be found in &man.dhclient.8;. + + + + /sbin/dhclient-script + + The + &os;-specific DHCP client configuration + script. It is described in &man.dhclient-script.8;, but + should not need any user modification to function + properly. + + + + /var/db/dhclient.leases.interface - FreeBSD utilizes, by default, a version of BIND (Berkeley - Internet Name Domain), which is the most common implementation - of the DNS protocol. DNS is the protocol through which names - are mapped to IP addresses, and vice versa. For example, a - query for www.FreeBSD.org will - receive a reply with the IP address of The FreeBSD Project's - web server, whereas, a query for ftp.FreeBSD.org will return the IP - address of the corresponding FTP machine. Likewise, the - opposite can happen. A query for an IP address can resolve - its hostname. It is not necessary to run a name server to - perform DNS lookups on a system. - - - DNS - DNS is coordinated across the Internet through a somewhat - complex system of authoritative root name servers, and other - smaller-scale name servers who host and cache individual domain - information. - - - - This document refers to BIND 8.x, as it is the stable version - used in &os;. Versions of &os; 5.3 and beyond include - BIND9 and the configuration instructions - may be found later in this chapter. Users of &os; 5.2 - and other previous versions may install BIND9 - from the net/bind9 port. - - - RFC1034 and RFC1035 dictate the DNS protocol. - - - - Currently, BIND is maintained by the - Internet Software Consortium http://www.isc.org/. - + The DHCP client keeps a database of + valid leases in this file, which is written as a log and + is described in &man.dhclient.leases.5;. + + - - Terminology + + Installing and Configuring a <acronym>DHCP</acronym> + Server - To understand this document, some terms related to DNS must be - understood. + This section demonstrates how to configure a &os; system + to act as a DHCP server using the Internet + Systems Consortium (ISC) implementation of + the DHCP server. This implementation and + its documentation can be installed using the + net/isc-dhcp42-server package or + port. - resolver - reverse DNS - root zone - - - - + + DHCP + server + - - - Term - Definition - - + + DHCP + installation + - - - Forward DNS - Mapping of hostnames to IP addresses - + The installation of + net/isc-dhcp42-server installs a sample + configuration file. Copy + /usr/local/etc/dhcpd.conf.example to + /usr/local/etc/dhcpd.conf and make any + edits to this new file. - - Origin - Refers to the domain covered in a particular zone - file - + + DHCP + dhcpd.conf + + The configuration file is comprised of declarations for + subnets and hosts which define the information that is + provided to DHCP clients. For example, + these lines configure the following: - - named, BIND, name server - Common names for the BIND name server package within - FreeBSD - + option domain-name "example.org"; +option domain-name-servers ns1.example.org; +option subnet-mask 255.255.255.0; - - Resolver - A system process through which a - machine queries a name server for zone information - +default-lease-time 600; +max-lease-time 72400; +ddns-update-style none; - - Reverse DNS - The opposite of forward DNS; mapping of IP addresses to - hostnames - +subnet 10.254.239.0 netmask 255.255.255.224 { + range 10.254.239.10 10.254.239.20; + option routers rtr-239-0-1.example.org, rtr-239-0-2.example.org; +} - - Root zone +host fantasia { + hardware ethernet 08:00:07:26:c0:a5; + fixed-address fantasia.fugue.com; +} - The beginning of the Internet zone hierarchy. - All zones fall under the root zone, similar to how - all files in a file system fall under the root directory. - + + + This option specifies the default search domain that + will be provided to clients. Refer to + &man.resolv.conf.5; for more information. + - - Zone - An individual domain, subdomain, or portion of the DNS administered by - the same authority - - - - + + This option specifies a comma separated list of + DNS servers that the client should use. + They can be listed by their Fully Qualified Domain Names + (FQDN), as seen in the example, or by + their IP addresses. + + + + The subnet mask that will be provided to + clients. + + + + The default lease expiry time in seconds. A client + can be configured to override this value. + + + + The maximum allowed length of time, in seconds, for a + lease. Should a client request a longer lease, a lease + will still be issued, but it will only be valid for + max-lease-time. + + + + The default of disables dynamic + DNS updates. Changing this to + configures the DHCP server to update a + DNS server whenever it hands out a + lease so that the DNS server knows + which IP addresses are associated with + which computers in the network. Do not change the default + setting unless the DNS server has been + configured to support dynamic + DNS. + + + + This line creates a pool of available + IP addresses which are reserved for + allocation to DHCP clients. The range + of addresses must be valid for the network or subnet + specified in the previous line. + + + + Declares the default gateway that is valid for the + network or subnet specified before the opening + { bracket. + + + + Specifies the hardware MAC address + of a client so that the DHCP server can + recognize the client when it makes a request. + + + + Specifies that this host should always be given the + same IP address. Using the hostname is + correct, since the DHCP server will + resolve the hostname before returning the lease + information. + + + + This configuration file supports many more options. Refer + to dhcpd.conf(5), installed with the server, for details and + examples. + + Once the configuration of dhcpd.conf + is complete, enable the DHCP server in + /etc/rc.conf: + + dhcpd_enable="YES" +dhcpd_ifaces="dc0" + + Replace the dc0 with the interface (or + interfaces, separated by whitespace) that the + DHCP server should listen on for + DHCP client requests. + + Start the server by issuing the following command: + + &prompt.root; service isc-dhcpd start + + Any future changes to the configuration of the server will + require the dhcpd service to be + stopped and then started using &man.service.8;. + + The DHCP server uses the following + files. Note that the manual pages are installed with the + server software. - zones - examples + DHCP + configuration files - - Examples of zones: - - - . is the root zone - - - org. is a zone under the root zone - - - example.org. is a - zone under the org. zone - - - foo.example.org. is - a subdomain, a zone under the example.org. zone - - - - 1.2.3.in-addr.arpa is a zone referencing - all IP addresses which fall under the 3.2.1.* IP space. - - - + + /usr/local/sbin/dhcpd - As one can see, the more specific part of a hostname - appears to its left. For example, example.org. is more specific than - org., as org. is more - specific than the root zone. The layout of each part of a - hostname is much like a file system: the - /dev directory falls within the root, and - so on. + More information about the + dhcpd server can be found in + dhcpd(8). + + + + /usr/local/etc/dhcpd.conf + The server configuration file needs to contain all the + information that should be provided to clients, along with + information regarding the operation of the server. This + configuration file is described in dhcpd.conf(5). + + + + /var/db/dhcpd.leases + The DHCP server keeps a database of + leases it has issued in this file, which is written as a + log. Refer to dhcpd.leases(5), which gives a slightly + longer description. + + + + /usr/local/sbin/dhcrelay + + This daemon is used in advanced environments where one + DHCP server forwards a request from a + client to another DHCP server on a + separate network. If this functionality is required, + install the net/isc-dhcp42-relay + package or port. The installation includes dhcrelay(8) + which provides more detail. + + +
+ + + + Domain Name System (<acronym>DNS</acronym>) + + DNS + + Domain Name System (DNS) is the protocol + through which domain names are mapped to IP + addresses, and vice versa. DNS is + coordinated across the Internet through a somewhat complex + system of authoritative root, Top Level Domain + (TLD), and other smaller-scale name servers, + which host and cache individual domain information. It is not + necessary to run a name server to perform + DNS lookups on a system. + + BIND + + In &os; 10, the Berkeley Internet Name Domain + (BIND) has been removed from the base system + and replaced with Unbound. Unbound as configured in the &os; + Base is a local caching resolver. BIND is + still available from The Ports Collection as dns/bind99 or dns/bind98. In &os; 9 and lower, + BIND is included in &os; Base. The &os; + version provides enhanced security features, a new file system + layout, and automated &man.chroot.8; configuration. + BIND is maintained by the Internet Systems + Consortium. + + resolver + reverse + DNS + root zone + + The following table describes some of the terms associated + with DNS: + + + <acronym>DNS</acronym> Terminology + + + + + + + + Term + Definition + + + + + + Forward DNS + Mapping of hostnames to IP + addresses. + + + + Origin + Refers to the domain covered in a particular zone + file. + + + + named, BIND + Common names for the BIND name server package + within &os;. + + + + Resolver + A system process through which a machine queries + a name server for zone information. + + + + Reverse DNS + Mapping of IP addresses to + hostnames. + + + + Root zone + + The beginning of the Internet zone hierarchy. All + zones fall under the root zone, similar to how all files + in a file system fall under the root directory. + + + + Zone + An individual domain, subdomain, or portion of the + DNS administered by the same + authority. + + + +
+ + + zones + examples + + + Examples of zones: + + + + . is how the root zone is + usually referred to in documentation. + + + + org. is a Top Level Domain + (TLD) under the root zone. + + + + example.org. is a zone + under the org. + TLD. + + + + 1.168.192.in-addr.arpa is a + zone referencing all IP addresses which + fall under the 192.168.1.* + IP address space. + + + + As one can see, the more specific part of a hostname + appears to its left. For example, example.org. is more + specific than org., as + org. is more specific than the root + zone. The layout of each part of a hostname is much like a file + system: the /dev directory falls within the + root, and so on. Reasons to Run a Name Server - Name servers usually come in two forms: an authoritative - name server, and a caching name server. + Name servers generally come in two forms: authoritative + name servers, and caching (also known as resolving) name + servers. An authoritative name server is needed when: - one wants to serve DNS information to the - world, replying authoritatively to queries. + One wants to serve DNS information + to the world, replying authoritatively to queries. + - a domain, such as example.org, is - registered and IP addresses need to be assigned to hostnames - under it. + A domain, such as example.org, is + registered and IP addresses need to be + assigned to hostnames under it. + - an IP address block requires reverse DNS entries (IP to - hostname). + An IP address block requires + reverse DNS entries + (IP to hostname). + - a backup name server, called a slave, must reply to queries - when the primary is down or inaccessible. - + A backup or second name server, called a slave, will + reply to queries. + A caching name server is needed when: - a local DNS server may cache and respond more quickly - than querying an outside name server. - - - a reduction in overall network traffic is desired (DNS - traffic has been measured to account for 5% or more of total - Internet traffic). + A local DNS server may cache and + respond more quickly than querying an outside name + server. - When one queries for www.FreeBSD.org, the resolver usually - queries the uplink ISP's name server, and retrieves the reply. - With a local, caching DNS server, the query only has to be - made once to the outside world by the caching DNS server. - Every additional query will not have to look to the outside of - the local network, since the information is cached - locally. + When one queries for www.FreeBSD.org, the + resolver usually queries the uplink ISP's + name server, and retrieves the reply. With a local, caching + DNS server, the query only has to be made + once to the outside world by the caching + DNS server. Additional queries will not + have to go outside the local network, since the information is + cached locally. + + + + <acronym>DNS</acronym> Server Configuration in &os; 10.0 + and Later + + In &os; 10.0, BIND has been + replaced with Unbound. + Unbound is a validating caching + resolver only. If an authoritative server is needed, many are + available from the Ports Collection. + + Unbound is provided in the &os; + base system. By default, it will provide + DNS resolution to the local machine only. + While the base system package can be configured to provide + resolution services beyond the local machine, it is + recommended that such requirements be addressed by installing + Unbound from the &os; Ports + Collection. + + To enable Unbound, add the + following to /etc/rc.conf: + + local_unbound_enable="YES" + + Any existing nameservers in + /etc/resolv.conf will be configured as + forwarders in the new Unbound + configuration. + + + If any of the listed nameservers do not support + DNSSEC, local DNS + resolution will fail. Be sure to test each nameserver and + remove any that fail the test. The following command will + show the trust tree or a failure for a nameserver running on + 192.168.1.1: + + &prompt.user; drill -S FreeBSD.org @192.168.1.1 + + Once each nameserver is confirmed to support + DNSSEC, start + Unbound: + + &prompt.root; service local_unbound onestart + + This will take care of updating + /etc/resolv.conf so that queries for + DNSSEC secured domains will now work. For + example, run the following to validate the FreeBSD.org + DNSSEC trust tree: + + &prompt.user; drill -S FreeBSD.org +;; Number of trusted keys: 1 +;; Chasing: freebsd.org. A + +DNSSEC Trust tree: +freebsd.org. (A) +|---freebsd.org. (DNSKEY keytag: 36786 alg: 8 flags: 256) + |---freebsd.org. (DNSKEY keytag: 32659 alg: 8 flags: 257) + |---freebsd.org. (DS keytag: 32659 digest type: 2) + |---org. (DNSKEY keytag: 49587 alg: 7 flags: 256) + |---org. (DNSKEY keytag: 9795 alg: 7 flags: 257) + |---org. (DNSKEY keytag: 21366 alg: 7 flags: 257) + |---org. (DS keytag: 21366 digest type: 1) + | |---. (DNSKEY keytag: 40926 alg: 8 flags: 256) + | |---. (DNSKEY keytag: 19036 alg: 8 flags: 257) + |---org. (DS keytag: 21366 digest type: 2) + |---. (DNSKEY keytag: 40926 alg: 8 flags: 256) + |---. (DNSKEY keytag: 19036 alg: 8 flags: 257) +;; Chase successful - How It Works - In FreeBSD, the BIND daemon is called - named for obvious reasons. + DNS Server Configuration in &os; + 9.<replaceable>X</replaceable> and Earlier + + In &os;, the BIND daemon is called + named. @@ -3102,296 +3248,499 @@ - named - the BIND daemon + &man.named.8; + The BIND daemon. - ndc - name daemon control program + &man.rndc.8; + Name server control utility. /etc/namedb - directory where BIND zone information resides + Directory where BIND zone information + resides. /etc/namedb/named.conf - daemon configuration file + Configuration file of the daemon. - - Zone files are usually contained within the - /etc/namedb - directory, and contain the DNS zone information - served by the name server. - - + Depending on how a given zone is configured on the server, + the files related to that zone can be found in the + master, + slave, or + dynamic subdirectories + of the /etc/namedb + directory. These files contain the DNS + information that will be given out by the name server in + response to queries. - + Starting BIND + - BIND + BIND starting - - Since BIND is installed by default, configuring it all is - relatively simple. - - - To ensure the named daemon is - started at boot, put the following line in - /etc/rc.conf: - + + Since BIND is installed by default, configuring it is + relatively simple. + + The default named configuration + is that of a basic resolving name server, running in a + &man.chroot.8; environment, and restricted to listening on the + local IPv4 loopback address (127.0.0.1). To start the server + one time with this configuration, use the following + command: + + &prompt.root; service named onestart + + To ensure the named daemon is + started at boot each time, put the following line into the + /etc/rc.conf: + named_enable="YES" - To start the daemon manually (after configuring it): - &prompt.root; ndc start - - + There are many configuration options for + /etc/namedb/named.conf that are beyond + the scope of this document. Other startup options + for named on &os; can be found in + the named_* + flags in /etc/defaults/rc.conf and in + &man.rc.conf.5;. The + section is also a good + read. + + + Configuration Files + - BIND + BIND configuration files - - Using <command>make-localhost</command> - Be sure to: - - &prompt.root; cd /etc/namedb -&prompt.root; sh make-localhost - to properly create the local reverse DNS zone file in - /etc/namedb/master/localhost.rev. - - - - <filename>/etc/namedb/named.conf</filename> + Configuration files for named + currently reside in + /etc/namedb directory + and will need modification before use unless all that is + needed is a simple resolver. This is where most of the + configuration will be performed. + + + <filename>/etc/namedb/named.conf</filename> - // $FreeBSD$ + // $FreeBSD$ // -// Refer to the named(8) manual page for details. If you are ever going -// to setup a primary server, make sure you've understood the hairy -// details of how DNS is working. Even with simple mistakes, you can -// break connectivity for affected parties, or cause huge amount of -// useless Internet traffic. +// Refer to the named.conf(5) and named(8) man pages, and the documentation +// in /usr/share/doc/bind9 for more details. +// +// If you are going to set up an authoritative server, make sure you +// understand the hairy details of how DNS works. Even with +// simple mistakes, you can break connectivity for affected parties, +// or cause huge amounts of useless Internet traffic. options { - directory "/etc/namedb"; - -// In addition to the "forwarders" clause, you can force your name -// server to never initiate queries of its own, but always ask its -// forwarders only, by enabling the following line: -// -// forward only; + // All file and path names are relative to the chroot directory, + // if any, and should be fully qualified. + directory "/etc/namedb/working"; + pid-file "/var/run/named/pid"; + dump-file "/var/dump/named_dump.db"; + statistics-file "/var/stats/named.stats"; + +// If named is being used only as a local resolver, this is a safe default. +// For named to be accessible to the network, comment this option, specify +// the proper IP address, or delete this option. + listen-on { 127.0.0.1; }; + +// If you have IPv6 enabled on this system, uncomment this option for +// use as a local resolver. To give access to the network, specify +// an IPv6 address, or the keyword "any". +// listen-on-v6 { ::1; }; + +// These zones are already covered by the empty zones listed below. +// If you remove the related empty zones below, comment these lines out. + disable-empty-zone "255.255.255.255.IN-ADDR.ARPA"; + disable-empty-zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA"; + disable-empty-zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA"; // If you've got a DNS server around at your upstream provider, enter // its IP address here, and enable the line below. This will make you -// benefit from its cache, thus reduce overall DNS traffic in the -Internet. +// benefit from its cache, thus reduce overall DNS traffic in the Internet. /* - forwarders { - 127.0.0.1; - }; -*/ + forwarders { + 127.0.0.1; + }; +*/ - - Just as the comment says, to benefit from an uplink's cache, - forwarders can be enabled here. Under normal - circumstances, a name server will recursively query the Internet - looking at certain name servers until it finds the answer it is - looking for. Having this enabled will have it query the uplink's - name server (or name server provided) first, taking advantage of - its cache. If the uplink name server in question is a heavily - trafficked, fast name server, enabling this may be worthwhile. - - - 127.0.0.1 - will not work here. - Change this IP address to a name server at your uplink. - - - /* - * If there is a firewall between you and name servers you want - * to talk to, you might need to uncomment the query-source - * directive below. Previous versions of BIND always asked - * questions using port 53, but BIND 8.1 uses an unprivileged - * port by default. - */ - // query-source address * port 53; - - /* - * If running in a sandbox, you may have to specify a different - * location for the dumpfile. - */ - // dump-file "s/named_dump.db"; -}; +// If the 'forwarders' clause is not empty the default is to 'forward first' +// which will fall back to sending a query from your local server if the name +// servers in 'forwarders' do not have the answer. Alternatively you can +// force your name server to never initiate queries of its own by enabling the +// following line: +// forward only; + +// If you wish to have forwarding configured automatically based on +// the entries in /etc/resolv.conf, uncomment the following line and +// set named_auto_forward=yes in /etc/rc.conf. You can also enable +// named_auto_forward_only (the effect of which is described above). +// include "/etc/namedb/auto_forward.conf"; + + Just as the comment says, to benefit from an uplink's + cache, forwarders can be enabled here. + Under normal circumstances, a name server will recursively + query the Internet looking at certain name servers until it + finds the answer it is looking for. Having this enabled + will have it query the uplink's name server (or name server + provided) first, taking advantage of its cache. If the + uplink name server in question is a heavily trafficked, fast + name server, enabling this may be worthwhile. -// Note: the following will be supported in a future release. -/* -host { any; } { - topology { - 127.0.0.0/8; - }; + + 127.0.0.1 + will not work here. Change this + IP address to a name server at the + uplink. + + + /* + Modern versions of BIND use a random UDP port for each outgoing + query by default in order to dramatically reduce the possibility + of cache poisoning. All users are strongly encouraged to utilize + this feature, and to configure their firewalls to accommodate it. + + AS A LAST RESORT in order to get around a restrictive firewall + policy you can try enabling the option below. Use of this option + will significantly reduce your ability to withstand cache poisoning + attacks, and should be avoided if at all possible. + + Replace NNNNN in the example with a number between 49160 and 65530. + */ + // query-source address * port NNNNN; }; -*/ -// Setting up secondaries is way easier and the rough picture for this -// is explained below. -// // If you enable a local name server, don't forget to enter 127.0.0.1 -// into your /etc/resolv.conf so this server will be queried first. +// first in your /etc/resolv.conf so this server will be queried. // Also, make sure to enable it in /etc/rc.conf. +// The traditional root hints mechanism. Use this, OR the slave zones below. +zone "." { type hint; file "/etc/namedb/named.root"; }; + +/* Slaving the following zones from the root name servers has some + significant advantages: + 1. Faster local resolution for your users + 2. No spurious traffic will be sent from your network to the roots + 3. Greater resilience to any potential root server failure/DDoS + + On the other hand, this method requires more monitoring than the + hints file to be sure that an unexpected failure mode has not + incapacitated your server. Name servers that are serving a lot + of clients will benefit more from this approach than individual + hosts. Use with caution. + + To use this mechanism, uncomment the entries below, and comment + the hint zone above. + + As documented at http://dns.icann.org/services/axfr/ these zones: + "." (the root), ARPA, IN-ADDR.ARPA, IP6.ARPA, and ROOT-SERVERS.NET + are available for AXFR from these servers on IPv4 and IPv6: + xfr.lax.dns.icann.org, xfr.cjr.dns.icann.org +*/ +/* zone "." { - type hint; - file "named.root"; + type slave; + file "/etc/namedb/slave/root.slave"; + masters { + 192.5.5.241; // F.ROOT-SERVERS.NET. + }; + notify no; }; - -zone "0.0.127.IN-ADDR.ARPA" { - type master; - file "localhost.rev"; +zone "arpa" { + type slave; + file "/etc/namedb/slave/arpa.slave"; + masters { + 192.5.5.241; // F.ROOT-SERVERS.NET. + }; + notify no; }; +*/ + +/* Serving the following zones locally will prevent any queries + for these zones leaving your network and going to the root + name servers. This has two significant advantages: + 1. Faster local resolution for your users + 2. No spurious traffic will be sent from your network to the roots +*/ +// RFCs 1912 and 5735 (and BCP 32 for localhost) +zone "localhost" { type master; file "/etc/namedb/master/localhost-forward.db"; }; +zone "127.in-addr.arpa" { type master; file "/etc/namedb/master/localhost-reverse.db"; }; +zone "255.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// RFC 1912-style zone for IPv6 localhost address +zone "0.ip6.arpa" { type master; file "/etc/namedb/master/localhost-reverse.db"; }; + +// "This" Network (RFCs 1912 and 5735) +zone "0.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// Private Use Networks (RFCs 1918 and 5735) +zone "10.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "16.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "17.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "18.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "19.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "20.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "21.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "22.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "23.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "24.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "25.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "26.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "27.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "28.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "29.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "30.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "31.172.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "168.192.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// Link-local/APIPA (RFCs 3927 and 5735) +zone "254.169.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IETF protocol assignments (RFCs 5735 and 5736) +zone "0.0.192.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// TEST-NET-[1-3] for Documentation (RFCs 5735 and 5737) +zone "2.0.192.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "100.51.198.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "113.0.203.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IPv6 Range for Documentation (RFC 3849) +zone "8.b.d.0.1.0.0.2.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// Domain Names for Documentation and Testing (BCP 32) +zone "test" { type master; file "/etc/namedb/master/empty.db"; }; +zone "example" { type master; file "/etc/namedb/master/empty.db"; }; +zone "invalid" { type master; file "/etc/namedb/master/empty.db"; }; +zone "example.com" { type master; file "/etc/namedb/master/empty.db"; }; +zone "example.net" { type master; file "/etc/namedb/master/empty.db"; }; +zone "example.org" { type master; file "/etc/namedb/master/empty.db"; }; + +// Router Benchmark Testing (RFCs 2544 and 5735) +zone "18.198.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "19.198.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IANA Reserved - Old Class E Space (RFC 5735) +zone "240.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "241.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "242.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "243.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "244.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "245.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "246.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "247.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "248.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "249.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "250.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "251.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "252.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "253.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "254.in-addr.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IPv6 Unassigned Addresses (RFC 4291) +zone "1.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "3.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "4.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "5.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "6.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "7.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "8.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "9.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "a.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "b.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "c.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "d.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "e.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "0.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "1.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "2.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "3.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "4.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "5.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "6.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "7.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "8.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "9.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "a.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "b.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "0.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "1.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "2.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "3.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "4.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "5.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "6.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "7.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IPv6 ULA (RFC 4193) +zone "c.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "d.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IPv6 Link Local (RFC 4291) +zone "8.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "9.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "a.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "b.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IPv6 Deprecated Site-Local Addresses (RFC 3879) +zone "c.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "d.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "e.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; +zone "f.e.f.ip6.arpa" { type master; file "/etc/namedb/master/empty.db"; }; + +// IP6.INT is Deprecated (RFC 4159) +zone "ip6.int" { type master; file "/etc/namedb/master/empty.db"; }; // NB: Do not use the IP addresses below, they are faked, and only // serve demonstration/documentation purposes! // -// Example secondary config entries. It can be convenient to become -// a secondary at least for the zone where your own domain is in. Ask +// Example slave zone config entries. It can be convenient to become +// a slave at least for the zone your own domain is in. Ask // your network administrator for the IP address of the responsible -// primary. +// master name server. // -// Never forget to include the reverse lookup (IN-ADDR.ARPA) zone! -// (This is the first bytes of the respective IP address, in reverse -// order, with ".IN-ADDR.ARPA" appended.) +// Do not forget to include the reverse lookup zone! +// This is named after the first bytes of the IP address, in reverse +// order, with ".IN-ADDR.ARPA" appended, or ".IP6.ARPA" for IPv6. // -// Before starting to setup a primary zone, better make sure you fully -// understand how DNS and BIND works, however. There are sometimes -// unobvious pitfalls. Setting up a secondary is comparably simpler. +// Before starting to set up a master zone, make sure you fully +// understand how DNS and BIND work. There are sometimes +// non-obvious pitfalls. Setting up a slave zone is usually simpler. // // NB: Don't blindly enable the examples below. :-) Use actual names // and addresses instead. -// -// NOTE!!! FreeBSD runs BIND in a sandbox (see named_flags in rc.conf). -// The directory containing the secondary zones must be write accessible -// to BIND. The following sequence is suggested: -// -// mkdir /etc/namedb/s -// chown bind:bind /etc/namedb/s -// chmod 750 /etc/namedb/s - - For more information on running BIND in a sandbox, see - Running named in a sandbox. - - - /* -zone "example.com" { - type slave; - file "s/example.com.bak"; - masters { - 192.168.1.1; - }; + +/* An example dynamic zone +key "exampleorgkey" { + algorithm hmac-md5; + secret "sf87HJqjkqh8ac87a02lla=="; +}; +zone "example.org" { + type master; + allow-update { + key "exampleorgkey"; + }; + file "/etc/namedb/dynamic/example.org"; }; +*/ -zone "0.168.192.in-addr.arpa" { - type slave; - file "s/0.168.192.in-addr.arpa.bak"; - masters { - 192.168.1.1; - }; +/* Example of a slave reverse zone +zone "1.168.192.in-addr.arpa" { + type slave; + file "/etc/namedb/slave/1.168.192.in-addr.arpa"; + masters { + 192.168.1.1; + }; }; */ - In named.conf, these are examples of slave - entries for a forward and reverse zone. - For each new zone served, a new zone entry must be added to - named.conf. + In named.conf, these are examples + of slave entries for a forward and reverse zone. + + For each new zone served, a new zone entry must be added + to named.conf. - For example, the simplest zone entry for - example.org can look like: + For example, the simplest zone entry for + example.org + can look like: - zone "example.org" { + zone "example.org" { type master; - file "example.org"; + file "master/example.org"; }; - The zone is a master, as indicated by the - statement, holding its zone information in - /etc/namedb/example.org indicated by - the statement. + The zone is a master, as indicated by the + statement, holding its zone + information in + /etc/namedb/master/example.org + indicated by the statement. - zone "example.org" { + zone "example.org" { type slave; - file "example.org"; + file "slave/example.org"; }; - In the slave case, the zone information is transferred from - the master name server for the particular zone, and saved in the - file specified. If and when the master server dies or is - unreachable, the slave name server will have the transferred - zone information and will be able to serve it. - + In the slave case, the zone information is transferred + from the master name server for the particular zone, and + saved in the file specified. If and when the master server + dies or is unreachable, the slave name server will have the + transferred zone information and will be able to serve + it. + - - Zone Files - - An example master zone file for example.org (existing within - /etc/namedb/example.org) is as follows: - + + Zone Files - $TTL 3600 + + BIND + zone files + -example.org. IN SOA ns1.example.org. admin.example.org. ( - 5 ; Serial - 10800 ; Refresh - 3600 ; Retry - 604800 ; Expire - 86400 ) ; Minimum TTL + An example master zone file for example.org (existing + within /etc/namedb/master/example.org) + is as follows: + + $TTL 3600 ; 1 hour default TTL +example.org. IN SOA ns1.example.org. admin.example.org. ( + 2006051501 ; Serial + 10800 ; Refresh + 3600 ; Retry + 604800 ; Expire + 300 ; Negative Response TTL + ) ; DNS Servers -@ IN NS ns1.example.org. -@ IN NS ns2.example.org. + IN NS ns1.example.org. + IN NS ns2.example.org. + +; MX Records + IN MX 10 mx.example.org. + IN MX 20 mail.example.org. + + IN A 192.168.1.1 ; Machine Names -localhost IN A 127.0.0.1 -ns1 IN A 3.2.1.2 -ns2 IN A 3.2.1.3 -mail IN A 3.2.1.10 -@ IN A 3.2.1.30 +localhost IN A 127.0.0.1 +ns1 IN A 192.168.1.2 +ns2 IN A 192.168.1.3 +mx IN A 192.168.1.4 +mail IN A 192.168.1.5 ; Aliases -www IN CNAME @ +www IN CNAME example.org. + + Note that every hostname ending in a . is + an exact hostname, whereas everything without a trailing + . is relative to the origin. For example, + ns1 is translated into + ns1.example.org. -; MX Record -@ IN MX 10 mail.example.org. + The format of a zone file follows: - - Note that every hostname ending in a . is an - exact hostname, whereas everything without a trailing - . is referenced to the origin. For example, - www is translated into - www.origin. - In our fictitious zone file, our origin is - example.org., so www - would translate to www.example.org. - - - - The format of a zone file follows: - - recordname IN recordtype value + recordname IN recordtype value - DNS + DNS records - - The most commonly used DNS records: - + + The most commonly used DNS + records: @@ -3403,8 +3752,9 @@ NS - an authoritative name server - + + an authoritative name server + A @@ -3415,7 +3765,8 @@ CNAME - the canonical name for an alias + the canonical name for an + alias @@ -3427,903 +3778,858 @@ PTR - a domain name pointer (used in reverse DNS) - + + a domain name pointer (used in reverse + DNS) + - -example.org. IN SOA ns1.example.org. admin.example.org. ( - 5 ; Serial + example.org. IN SOA ns1.example.org. admin.example.org. ( + 2006051501 ; Serial 10800 ; Refresh after 3 hours 3600 ; Retry after 1 hour 604800 ; Expire after 1 week - 86400 ) ; Minimum TTL of 1 day - - + 300 ) ; Negative Response TTL - example.org. + example.org. - the domain name, also the origin for this - zone file. + + the domain name, also the origin for this + zone file. + - ns1.example.org. + ns1.example.org. - the primary/authoritative name server for this - zone. + + the primary/authoritative name server for this + zone. + admin.example.org. - the responsible person for this zone, + + the responsible person for this zone, email address with @ - replaced. (admin@example.org becomes + replaced. (admin@example.org becomes admin.example.org) - 5 + 2006051501 - the serial number of the file. This - must be incremented each time the zone file is - modified. Nowadays, many admins prefer a - yyyymmddrr format for the serial - number. 2001041002 would mean - last modified 04/10/2001, the latter - 02 being the second time the zone - file has been modified this day. The serial number - is important as it alerts slave name servers for a - zone when it is updated. - + + the serial number of the file. This must be + incremented each time the zone file is modified. + Nowadays, many admins prefer a + yyyymmddrr format for the serial + number. 2006051501 would mean last + modified 05/15/2006, the latter 01 + being the first time the zone file has been modified + this day. The serial number is important as it alerts + slave name servers for a zone when it is + updated. + - -@ IN NS ns1.example.org. + IN NS ns1.example.org. - - This is an NS entry. Every name server that is going to reply - authoritatively for the zone must have one of these entries. - The @ as seen here could have been - example.org. - The @ translates to the origin. - - - -localhost IN A 127.0.0.1 -ns1 IN A 3.2.1.2 -ns2 IN A 3.2.1.3 -mail IN A 3.2.1.10 -@ IN A 3.2.1.30 - - - The A record indicates machine names. As seen above, - ns1.example.org would resolve - to 3.2.1.2. Again, the - origin symbol, @, is used here, thus - meaning example.org would - resolve to 3.2.1.30. - - - -www IN CNAME @ - - - The canonical name record is usually used for giving aliases - to a machine. In the example, www is - aliased to the machine addressed to the origin, or - example.org - (3.2.1.30). - CNAMEs can be used to provide alias - hostnames, or round robin one hostname among multiple - machines. - + This is an NS entry. Every name server that is going to + reply authoritatively for the zone must have one of these + entries. + + localhost IN A 127.0.0.1 +ns1 IN A 192.168.1.2 +ns2 IN A 192.168.1.3 +mx IN A 192.168.1.4 +mail IN A 192.168.1.5 + + The A record indicates machine names. As seen above, + ns1.example.org would + resolve to 192.168.1.2. + + IN A 192.168.1.1 + + This line assigns IP address + 192.168.1.1 to + the current origin, in this case example.org. + + www IN CNAME @ + + The canonical name record is usually used for giving + aliases to a machine. In the example, + www is aliased to the + master machine whose name happens to be the + same as the domain name example.org + (192.168.1.1). + CNAMEs can never be used together with another kind of + record for the same hostname. MX record - -@ IN MX 10 mail.example.org. + IN MX 10 mail.example.org. - - The MX record indicates which mail - servers are responsible for handling incoming mail for the - zone. mail.example.org is the - hostname of the mail server, and 10 being the priority of - that mail server. - - - - One can have several mail servers, with priorities of 3, 2, - 1. A mail server attempting to deliver to example.org would first try the - highest priority MX, then the second highest, etc, until the - mail can be properly delivered. - - - - For in-addr.arpa zone files (reverse DNS), the same format is - used, except with PTR entries instead of - A or CNAME. - + The MX record indicates which mail servers are + responsible for handling incoming mail for the zone. + mail.example.org is the + hostname of a mail server, and 10 is the priority of that + mail server. + + One can have several mail servers, with priorities of + 10, 20 and so on. A mail server attempting to deliver to + example.org + would first try the highest priority MX (the record with the + lowest priority number), then the second highest, etc, until + the mail can be properly delivered. + + For in-addr.arpa zone files (reverse + DNS), the same format is used, except + with PTR entries instead of A or CNAME. - $TTL 3600 + $TTL 3600 -1.2.3.in-addr.arpa. IN SOA ns1.example.org. admin.example.org. ( - 5 ; Serial +1.168.192.in-addr.arpa. IN SOA ns1.example.org. admin.example.org. ( + 2006051501 ; Serial 10800 ; Refresh 3600 ; Retry 604800 ; Expire - 3600 ) ; Minimum - -@ IN NS ns1.example.org. -@ IN NS ns2.example.org. + 300 ) ; Negative Response TTL -2 IN PTR ns1.example.org. -3 IN PTR ns2.example.org. -10 IN PTR mail.example.org. -30 IN PTR example.org. + IN NS ns1.example.org. + IN NS ns2.example.org. - This file gives the proper IP address to hostname - mappings of our above fictitious domain. - - +1 IN PTR example.org. +2 IN PTR ns1.example.org. +3 IN PTR ns2.example.org. +4 IN PTR mx.example.org. +5 IN PTR mail.example.org. + + This file gives the proper IP address + to hostname mappings for the above fictitious domain. + + It is worth noting that all names on the right side + of a PTR record need to be fully qualified (i.e., end in + a .). + + - + Caching Name Server - - BIND - caching name server - - A caching name server is a name server that is not - authoritative for any zones. It simply asks queries of its - own, and remembers them for later use. To set one up, just - configure the name server as usual, omitting any inclusions of - zones. - - - - Running <application>named</application> in a Sandbox - BIND - running in a sandbox - - - - chroot - - For added security you may want to run &man.named.8; as an - unprivileged user, and configure it to &man.chroot.8; into a - sandbox directory. This makes everything outside of the - sandbox inaccessible to the named - daemon. Should named be - compromised, this will help to reduce the damage that can be - caused. By default, FreeBSD has a user and a group called - bind, intended for this use. - - Various people would recommend that instead of configuring - named to chroot, you - should run named inside a &man.jail.8;. - This section does not attempt to cover this situation. - - - Since named will not be able to - access anything outside of the sandbox (such as shared - libraries, log sockets, and so on), there are a number of steps - that need to be followed in order to allow - named to function correctly. In the - following checklist, it is assumed that the path to the sandbox - is /etc/namedb and that you have made no - prior modifications to the contents of this directory. Perform - the following steps as root: - - - - Create all directories that named - expects to see: - - &prompt.root; cd /etc/namedb -&prompt.root; mkdir -p bin dev etc var/tmp var/run master slave -&prompt.root; chown bind:bind slave var/* - - - - - - named only needs write access to - these directories, so that is all we give it. - - - - - - Rearrange and create basic zone and configuration files: - &prompt.root; cp /etc/localtime etc -&prompt.root; mv named.conf etc && ln -sf etc/named.conf -&prompt.root; mv named.root master - -&prompt.root; sh make-localhost -&prompt.root; cat > master/named.localhost -$ORIGIN localhost. -$TTL 6h -@ IN SOA localhost. postmaster.localhost. ( - 1 ; serial - 3600 ; refresh - 1800 ; retry - 604800 ; expiration - 3600 ) ; minimum - IN NS localhost. - IN A 127.0.0.1 -^D - - - - This allows named to log the - correct time to &man.syslogd.8;. - - - - - - - syslog - log files - named - - If you are running a version of &os; prior to 4.9-RELEASE, build a statically linked copy of - named-xfer, and copy it into the sandbox: - - &prompt.root; cd /usr/src/lib/libisc -&prompt.root; make cleandir && make cleandir && make depend && make all -&prompt.root; cd /usr/src/lib/libbind -&prompt.root; make cleandir && make cleandir && make depend && make all -&prompt.root; cd /usr/src/libexec/named-xfer -&prompt.root; make cleandir && make cleandir && make depend && make NOSHARED=yes all -&prompt.root; cp named-xfer /etc/namedb/bin && chmod 555 /etc/namedb/bin/named-xfer - - After your statically linked - named-xfer is installed some cleaning up - is required, to avoid leaving stale copies of libraries or - programs in your source tree: - - &prompt.root; cd /usr/src/lib/libisc -&prompt.root; make cleandir -&prompt.root; cd /usr/src/lib/libbind -&prompt.root; make cleandir -&prompt.root; cd /usr/src/libexec/named-xfer -&prompt.root; make cleandir - - - - This step has been reported to fail occasionally. If this - happens to you, then issue the command: - - &prompt.root; cd /usr/src && make cleandir && make cleandir - - and delete your /usr/obj tree: - - &prompt.root; rm -fr /usr/obj && mkdir /usr/obj - - This will clean out any cruft from your - source tree, and retrying the steps above should then work. - - - - If you are running &os; version 4.9-RELEASE or later, - then the copy of named-xfer in - /usr/libexec is statically linked by - default, and you can simply use &man.cp.1; to copy it into - your sandbox. - - - - Make a dev/null that - named can see and write to: - - &prompt.root; cd /etc/namedb/dev && mknod null c 2 2 -&prompt.root; chmod 666 null - - - - Symlink /var/run/ndc to - /etc/namedb/var/run/ndc: - - &prompt.root; ln -sf /etc/namedb/var/run/ndc /var/run/ndc - - - This simply avoids having to specify the - option to &man.ndc.8; every time you - run it. Since the contents of - /var/run are deleted on boot, it may - be useful to add this command to - root's &man.crontab.5;, using the - option. - - - - - - - syslog - log files - named - - Configure &man.syslogd.8; to create an extra - log socket that - named can write to. To do this, - add -l /etc/namedb/dev/log to the - syslogd_flags variable in - /etc/rc.conf. - - - - - chroot - - Arrange to have named start - and chroot itself to the sandbox by - adding the following to - /etc/rc.conf: - - named_enable="YES" -named_flags="-u bind -g bind -t /etc/namedb /etc/named.conf" - - - Note that the configuration file - /etc/named.conf is denoted by a full - pathname relative to the sandbox, i.e. in - the line above, the file referred to is actually - /etc/namedb/etc/named.conf. - - - - - The next step is to edit - /etc/namedb/etc/named.conf so that - named knows which zones to load and - where to find them on the disk. There follows a commented - example (anything not specifically commented here is no - different from the setup for a DNS server not running in a - sandbox): - - options { - directory "/"; - named-xfer "/bin/named-xfer"; - version ""; // Don't reveal BIND version - query-source address * port 53; -}; -// ndc control socket -controls { - unix "/var/run/ndc" perm 0600 owner 0 group 0; -}; -// Zones follow: -zone "localhost" IN { - type master; - file "master/named.localhost"; - allow-transfer { localhost; }; - notify no; -}; -zone "0.0.127.in-addr.arpa" IN { - type master; - file "master/localhost.rev"; - allow-transfer { localhost; }; - notify no; -}; -zone "." IN { - type hint; - file "master/named.root"; -}; -zone "private.example.net" in { - type master; - file "master/private.example.net.db"; - allow-transfer { 192.168.10.0/24; }; -}; -zone "10.168.192.in-addr.arpa" in { - type slave; - masters { 192.168.10.2; }; - file "slave/192.168.10.db"; -}; - - - - The - directory statement is specified as - /, since all files that - named needs are within this - directory (recall that this is equivalent to a - normal user's - /etc/namedb). - - - - Specifies the full path - to the named-xfer binary (from - named's frame of reference). This - is necessary since named is - compiled to look for named-xfer in - /usr/libexec by default. - - Specifies the filename (relative - to the directory statement above) where - named can find the zone file for this - zone. - - Specifies the filename - (relative to the directory statement above) - where named should write a copy of - the zone file for this zone after successfully transferring it - from the master server. This is why we needed to change the - ownership of the directory slave to - bind in the setup stages above. - - - - After completing the steps above, either reboot your - server or restart &man.syslogd.8; and start &man.named.8;, making - sure to use the new options specified in - syslogd_flags and - named_flags. You should now be running a - sandboxed copy of named! - - - - - Security - - Although BIND is the most common implementation of DNS, - there is always the issue of security. Possible and - exploitable security holes are sometimes found. - - - - It is a good idea to read CERT's security advisories and - to subscribe to the &a.security-notifications; - to stay up to date with the current Internet and FreeBSD security - issues. - - - If a problem arises, keeping sources up to date and - having a fresh build of named would - not hurt. - - - - Further Reading - - BIND/named manual pages: - &man.ndc.8; &man.named.8; &man.named.conf.5; - - - - Official ISC BIND - Page - - - - - BIND FAQ - - - - O'Reilly - DNS and BIND 4th Edition - - - - RFC1034 - - Domain Names - Concepts and Facilities - - - - RFC1035 - - Domain Names - Implementation and Specification - - - -
- - - <acronym>BIND</acronym>9 and &os; - - TomRhodesWritten by - - - - - - - bind9 - setting up - - The release of &os; 5.3 brought the - BIND9 DNS server software - into the distribution. New security features, a new file system - layout and automated &man.chroot.8; configuration came with the - import. This section has been written in two parts, the first - will discuss new features and their configuration; the latter - will cover upgrades to aid in move to &os; 5.3. From this - moment on, the server will be referred to simply as - &man.named.8; in place of BIND. This section - skips over the terminology described in the previous section as - well as some of the theoretical discussions; thus, it is - recommended that the previous section be consulted before reading - any further here. - - Configuration files for named currently - reside in - /var/named/etc/namedb/ and - will need modification before use. This is where most of the - configuration will be performed. - - - Configuration of a Master Zone - - To configure a master zone visit - /var/named/etc/namedb/ - and run the following command: - - &prompt.root; sh make-localhost - - If all went well a new file should exist in the - master directory. The - filenames should be localhost.rev for - the local domain name and localhost-v6.rev - for IPv6 configurations. As the default - configuration file, configuration for its use will already - be present in the named.conf file. - + BIND + caching name server + - - Configuration of a Slave Zone + A caching name server is a name server whose primary role + is to resolve recursive queries. It simply asks queries of + its own, and remembers the answers for later use. + + + + <acronym role="Domain Name Security + Extensions">DNSSEC</acronym> - Configuration for extra domains or sub domains may be - done properly by setting them as a slave zone. In most cases, - the master/localhost.rev file could just be - copied over into the slave - directory and modified. Once completed, the files need - to be properly added in named.conf such - as in the following configuration for - example.com: - - zone "example.com" { - type slave; - file "slave/example.com"; - masters { - 10.0.0.1; - }; -}; + + BIND + DNS security + extensions + -zone "0.168.192.in-addr.arpa" { - type slave; - file "slave/0.168.192.in-addr.arpa"; - masters { - 10.0.0.1; - }; + Domain Name System Security Extensions, or DNSSEC for + short, is a suite of specifications to protect resolving name + servers from forged DNS data, such as + spoofed DNS records. By using digital + signatures, a resolver can verify the integrity of the record. + Note that DNSSEC only provides integrity via + digitally signing the Resource Records (RRs). It provides neither + confidentiality nor protection against false end-user + assumptions. This means that it cannot protect against people + going to example.net instead of + example.com. + The only thing DNSSEC does is authenticate + that the data has not been compromised in transit. The + security of DNS is an important step in + securing the Internet in general. For more in-depth details + of how DNSSEC works, the relevant + RFCs are a good place to start. See the + list in . + + The following sections will demonstrate how to enable + DNSSEC for an authoritative + DNS server and a recursive (or caching) + DNS server running BIND + 9. While all versions of BIND 9 support + DNSSEC, it is necessary to have at least + version 9.6.2 in order to be able to use the signed root zone + when validating DNS queries. This is + because earlier versions lack the required algorithms to + enable validation using the root zone key. It is strongly + recommended to use the latest version of + BIND 9.7 or later to take advantage of + automatic key updating for the root key, as well as other + features to automatically keep zones signed and signatures up + to date. Where configurations differ between 9.6.2 and 9.7 + and later, differences will be pointed out. + + + Recursive <acronym>DNS</acronym> Server + Configuration + + Enabling DNSSEC validation of queries + performed by a recursive DNS server + requires a few changes to named.conf. + Before making these changes the root zone key, or trust + anchor, must be acquired. Currently the root zone key is + not available in a file format BIND + understands, so it has to be manually converted into the + proper format. The key itself can be obtained by querying + the root zone for it using dig. + By running + + &prompt.user; dig +multi +noall +answer DNSKEY . > root.dnskey + + the key will end up in root.dnskey. + The contents should look something like this: + + . 93910 IN DNSKEY 257 3 8 ( + AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQ + bSEW0O8gcCjFFVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh + /RStIoO8g0NfnfL2MTJRkxoXbfDaUeVPQuYEhg37NZWA + JQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaDX6RS6CXp + oY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3 + LQpzW5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGO + Yl7OyQdXfZ57relSQageu+ipAdTTJ25AsRTAoub8ONGc + LmqrAmRLKBP1dfwhYB4N7knNnulqQxA+Uk1ihz0= + ) ; key id = 19036 +. 93910 IN DNSKEY 256 3 8 ( + AwEAAcaGQEA+OJmOzfzVfoYN249JId7gx+OZMbxy69Hf + UyuGBbRN0+HuTOpBxxBCkNOL+EJB9qJxt+0FEY6ZUVjE + g58sRr4ZQ6Iu6b1xTBKgc193zUARk4mmQ/PPGxn7Cn5V + EGJ/1h6dNaiXuRHwR+7oWh7DnzkIJChcTqlFrXDW3tjt +) ; key id = 34525 + + Do not be alarmed if the obtained keys differ from this + example. They might have changed since these instructions + were last updated. This output actually contains two keys. + The first key in the listing, with the value 257 after the + DNSKEY record type, is the one needed. This value indicates + that this is a Secure Entry Point + (SEP), commonly + known as a Key Signing Key + (KSK). The second + key, with value 256, is a subordinate key, commonly called a + Zone Signing Key + (ZSK). More on + the different key types later in + . + + Now the key must be verified and formatted so that + BIND can use it. To verify the key, + generate a DS + RR set. Create a + file containing these + RRs with + + &prompt.user; dnssec-dsfromkey -f root.dnskey . > root.ds + + These records use SHA-1 and SHA-256 respectively, and + should look similar to the following example, where the + longer is using SHA-256. + + . IN DS 19036 8 1 + B256BD09DC8DD59F0E0F0D8541B8328DD986DF6E +. IN DS 19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5 + + The SHA-256 RR can now be compared to + the digest in https://data.iana.org/root-anchors/root-anchors.xml. + To be absolutely sure that the key has not been tampered + with the data in the XML file can be + verified using the PGP signature in + https://data.iana.org/root-anchors/root-anchors.asc. + + Next, the key must be formatted properly. This differs + a little between BIND versions 9.6.2 and + 9.7 and later. In version 9.7 support was added to + automatically track changes to the key and update it as + necessary. This is done using + managed-keys as seen in the example + below. When using the older version, the key is added using + a trusted-keys statement and updates must + be done manually. For BIND 9.6.2 the + format should look like: + + trusted-keys { + "." 257 3 8 + "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF + FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX + bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD + X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz + W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS + Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq + QxA+Uk1ihz0="; }; - Note well that in this example, the master - IP address is the primary domain server - from which the zones are transferred; it does not necessary serve - as DNS server itself. - + For 9.7 the format will instead be: - - System Initialization Configuration + managed-keys { + "." initial-key 257 3 8 + "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF + FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX + bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD + X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz + W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS + Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq + QxA+Uk1ihz0="; +}; - In order for the named daemon to start - when the system is booted, the following option must be present - in the rc.conf file: + The root key can now be added to + named.conf either directly or by + including a file containing the key. After these steps, + configure BIND to do + DNSSEC validation on queries by editing + named.conf and adding the following to + the options directive: + + dnssec-enable yes; +dnssec-validation yes; + + To verify that it is actually working use + dig to make a query for a signed + zone using the resolver just configured. A successful reply + will contain the AD flag to indicate the + data was authenticated. Running a query such as + + &prompt.user; dig @resolver +dnssec se ds + + should return the DS + RR for the .se zone. + In the flags: section the + AD flag should be set, as seen + in: + + ... +;; flags: qr rd ra ad; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1 +... + + The resolver is now capable of authenticating + DNS queries. + + + + Authoritative <acronym>DNS</acronym> Server + Configuration + + In order to get an authoritative name server to serve a + DNSSEC signed zone a little more work is + required. A zone is signed using cryptographic keys which + must be generated. It is possible to use only one key for + this. The preferred method however is to have a strong + well-protected Key Signing Key + (KSK) that is + not rotated very often and a Zone Signing Key + (ZSK) that is + rotated more frequently. Information on recommended + operational practices can be found in RFC + 4641: DNSSEC Operational + Practices. Practices regarding the root zone can + be found in DNSSEC + Practice Statement for the Root Zone + KSK operator and DNSSEC + Practice Statement for the Root Zone + ZSK operator. The + KSK is used to + build a chain of authority to the data in need of validation + and as such is also called a Secure Entry Point + (SEP) key. A + message digest of this key, called a Delegation Signer + (DS) record, + must be published in the parent zone to establish the trust + chain. How this is accomplished depends on the parent zone + owner. The ZSK + is used to sign the zone, and only needs to be published + there. + + To enable DNSSEC for the example.com zone + depicted in previous examples, the first step is to use + dnssec-keygen to generate the + KSK and ZSK key pair. + This key pair can utilize different cryptographic + algorithms. It is recommended to use RSA/SHA256 for the + keys and 2048 bits key length should be enough. To generate + the KSK for example.com, run + + &prompt.user; dnssec-keygen -f KSK -a RSASHA256 -b 2048 -n ZONE example.com + + and to generate the ZSK, run + + &prompt.user; dnssec-keygen -a RSASHA256 -b 2048 -n ZONE example.com + + dnssec-keygen outputs two + files, the public and the private keys in files named + similar to Kexample.com.+005+nnnnn.key + (public) and + Kexample.com.+005+nnnnn.private + (private). The nnnnn part of the file + name is a five digit key ID. Keep track of which key ID + belongs to which key. This is especially important when + having more than one key in a zone. It is also possible to + rename the keys. For each KSK file + do: + + &prompt.user; mv Kexample.com.+005+nnnnn.key Kexample.com.+005+nnnnn.KSK.key +&prompt.user; mv Kexample.com.+005+nnnnn.private Kexample.com.+005+nnnnn.KSK.private + + For the ZSK files, substitute + KSK for ZSK as + necessary. The files can now be included in the zone file, + using the $include statement. It should + look something like this: + + $include Kexample.com.+005+nnnnn.KSK.key ; KSK +$include Kexample.com.+005+nnnnn.ZSK.key ; ZSK + + Finally, sign the zone and tell BIND + to use the signed zone file. To sign a zone + dnssec-signzone is used. The + command to sign the zone example.com, located in + example.com.db would look similar + to + + &prompt.user; dnssec-signzone -o + example.com -k Kexample.com.+005+nnnnn.KSK example.com.db + Kexample.com.+005+nnnnn.ZSK.key + + The key supplied to the argument is + the KSK and the other key file is the + ZSK that should be used in the signing. + It is possible to supply more than one + KSK and ZSK, which + will result in the zone being signed with all supplied keys. + This can be needed to supply zone data signed using more + than one algorithm. The output of + dnssec-signzone is a zone file + with all RRs signed. This output will + end up in a file with the extension + .signed, such as + example.com.db.signed. The + DS records will + also be written to a separate file + dsset-example.com. To use this signed + zone just modify the zone directive in + named.conf to use + example.com.db.signed. By default, the + signatures are only valid 30 days, meaning that the zone + needs to be resigned in about 15 days to be sure that + resolvers are not caching records with stale signatures. It + is possible to make a script and a cron job to do this. See + relevant manuals for details. + + Be sure to keep private keys confidential, as with all + cryptographic keys. When changing a key it is best to + include the new key into the zone, while still signing with + the old one, and then move over to using the new key to + sign. After these steps are done the old key can be removed + from the zone. Failure to do this might render the + DNS data unavailable for a time, until + the new key has propagated through the + DNS hierarchy. For more information on + key rollovers and other DNSSEC + operational issues, see RFC + 4641: DNSSEC Operational + practices. + + + + Automation Using <acronym>BIND</acronym> 9.7 or + Later + + Beginning with BIND version 9.7 a new + feature called Smart Signing was + introduced. This feature aims to make the key management + and signing process simpler by automating parts of the task. + By putting the keys into a directory called a + key repository, and using the new + option auto-dnssec, it is possible to + create a dynamic zone which will be resigned as needed. To + update this zone use nsupdate + with the new option . + rndc has also grown the ability + to sign zones with keys in the key repository, using the + option . To tell + BIND to use this automatic signing and + zone updating for example.com, add the + following to named.conf: - named_enable="YES" + zone example.com { + type master; + key-directory "/etc/named/keys"; + update-policy local; + auto-dnssec maintain; + file "/etc/named/dynamic/example.com.zone"; +}; - While other options exist, this is the bare minimal - requirement. Consult the &man.rc.conf.5; manual page for - a list of the other options. If nothing is entered in the - rc.conf file then named - may be started on the command line by invoking: + After making these changes, generate keys for the zone + as explained in , put those + keys in the key repository given as the argument to the + key-directory in the zone configuration + and the zone will be signed automatically. Updates to a + zone configured this way must be done using + nsupdate, which will take care of + re-signing the zone with the new data added. For further + details, see and the + BIND documentation. + + - &prompt.root; /etc/rc.d/named start - + + Security - - <acronym>BIND</acronym>9 Security + Although BIND is the most common implementation of + DNS, there is always the issue of security. + Possible and exploitable security holes are sometimes + found. + + While &os; automatically drops + named into a &man.chroot.8; + environment; there are several other security mechanisms in + place which could help to lure off possible + DNS service attacks. + + It is always good idea to read + CERT's security + advisories and to subscribe to the &a.security-notifications; + to stay up to date with the current Internet and &os; security + issues. + + + If a problem arises, keeping sources up to date and + having a fresh build of named + may help. + + - While &os; automatically drops named - into a &man.chroot.8; environment; there are several other - security mechanisms in place which could help to lure off - possible DNS service attacks. + + Further Reading - - Query Access Control Lists + BIND/named manual pages: + &man.rndc.8; &man.named.8; &man.named.conf.5; &man.nsupdate.1; + &man.dnssec-signzone.8; &man.dnssec-keygen.8; - A query access control list can be used to restrict - queries against the zones. The configuration works by - defining the network inside of the acl - token and then listing IP addresses in - the zone configuration. To permit domains to query the - example host, just define it like this: + + + Official + ISC BIND Page + - acl "example.com" { - 192.168.0.0/24; -}; + + Official + ISC BIND Forum + -zone "example.com" { - type slave; - file "slave/example.com"; - masters { - 10.0.0.1; - }; - allow-query { example.com; }; -}; + + O'Reilly + DNS and BIND 5th + Edition + -zone "0.168.192.in-addr.arpa" { - type slave; - file "slave/0.168.192.in-addr.arpa"; - masters { - 10.0.0.1; - }; - allow-query { example.com; }; -}; - + + Root + DNSSEC + - - Restrict Version + + DNSSEC + Trust Anchor Publication for the Root + Zone + - Permitting version lookups on the DNS - server could be opening the doors for an attacker. A - malicious user may use this information to hunt up known - exploits or bugs to utilize against the host. + + RFC1034 + - Domain Names - Concepts and Facilities + - - Setting a false version will not protect the server - from exploits. Only upgrading to a version that is not - vulnerable will protect your server. - + + RFC1035 + - Domain Names - Implementation and + Specification + - A false version string can be placed the - options section of - named.conf: - - options { - directory "/etc/namedb"; - pid-file "/var/run/named/pid"; - dump-file "/var/dump/named_dump.db"; - statistics-file "/var/stats/named.stats"; - version "None of your business"; -}; - + + RFC4033 + - DNS Security Introduction and + Requirements + - + + RFC 5011 + - Automated Updates of DNS Security + (DNSSEC + Trust Anchors + + + - - Apache HTTP Server + + Apache HTTP Server + - MurrayStokelyContributed by + + + Murray + Stokely + + Contributed by + - web servers setting up Apache - - Overview - - &os; is used to run some of the busiest web sites in the - world. The majority of web servers on the Internet are using - the Apache HTTP Server. - Apache software packages should be - included on your FreeBSD installation media. If you did not - install Apache when you first - installed FreeBSD, then you can install it from the www/apache13 or www/apache20 port. - - Once Apache has been installed - successfully, it must be configured. - - This section covers version 1.3.X of the - Apache HTTP Server as that is the - most widely used version for &os;. Apache 2.X introduces many - new technologies but they are not discussed here. For more - information about Apache 2.X, please see http://httpd.apache.org/. - - + The open source + Apache HTTP Server is the most widely + used web server. &os; does not install this web server by + default, but it can be installed from the + www/apache24 package or port. + + This section summarizes how to configure and start version + 2.x of the Apache HTTP + Server on &os;. For more detailed information + about Apache 2.X and its + configuration directives, refer to httpd.apache.org. - Configuration + Configuring and Starting Apache Apache configuration file - The main Apache HTTP Server configuration file is - installed as - /usr/local/etc/apache/httpd.conf on &os;. - This file is a typical &unix; text configuration file with - comment lines beginning with the # - character. A comprehensive description of all possible - configuration options is outside the scope of this book, so - only the most frequently modified directives will be described - here. + In &os;, the main Apache HTTP + Server configuration file is installed as + /usr/local/etc/apache2x/httpd.conf, + where x represents the version + number. This ASCII text file begins + comment lines with a #. The most + frequently modified directives are: ServerRoot "/usr/local" - This specifies the default directory hierarchy for - the Apache installation. Binaries are stored in the - bin and - sbin subdirectories - of the server root, and configuration files are stored in - etc/apache. + Specifies the default directory hierarchy for the + Apache installation. + Binaries are stored in the bin and + sbin subdirectories of the server + root and configuration files are stored in the etc/apache2x + subdirectory. - ServerAdmin you@your.address + ServerAdmin you@example.com - The address to which problems with the server should - be emailed. This address appears on some + Change this to the email address to receive problems + with the server. This address also appears on some server-generated pages, such as error documents. - ServerName www.example.com + ServerName + www.example.com:80 - ServerName allows you to set a host name which is - sent back to clients for your server if it is different - to the one that the host is configured with (i.e., use www - instead of the host's real name). + Allows an administrator to set a hostname which is + sent back to clients for the server. For example, + www can be used instead of the + actual hostname. If the system does not have a + registered DNS name, enter its + IP address instead. If the server + will listen on an alternate report, change + 80 to the alternate port + number. - DocumentRoot "/usr/local/www/data" + DocumentRoot + "/usr/local/www/apache2x/data" - DocumentRoot: The directory out of which you will - serve your documents. By default, all requests are taken - from this directory, but symbolic links and aliases may - be used to point to other locations. + The directory where documents will be served from. + By default, all requests are taken from this directory, + but symbolic links and aliases may be used to point to + other locations. - It is always a good idea to make backup copies of your - Apache configuration file before making changes. Once you are - satisfied with your initial configuration you are ready to - start running Apache. - - - - - - - - - - Running <application>Apache</application> + It is always a good idea to make a backup copy of the + default Apache configuration file + before making changes. When the configuration of + Apache is complete, save the file + and verify the configuration using + apachectl. Running apachectl + configtest should return Syntax + OK. Apache starting or stopping - Apache does not run from the - inetd super server as many other - network servers do. It is configured to run standalone for - better performance for incoming HTTP requests from client web - browsers. A shell script wrapper is included to make - starting, stopping, and restarting the server as simple as - possible. To start up Apache for - the first time, just run: - - &prompt.root; /usr/local/sbin/apachectl start - - You can stop the server at any time by typing: - - &prompt.root; /usr/local/sbin/apachectl stop - - After making changes to the configuration file for any - reason, you will need to restart the server: - - &prompt.root; /usr/local/sbin/apachectl restart - - To restart Apache without - aborting current connections, run: - - &prompt.root; /usr/local/sbin/apachectl graceful - - Additional information available at - &man.apachectl.8; manual page. - To launch Apache at system - startup, add the following line to - /etc/rc.conf: + startup, add the following line to + /etc/rc.conf: + + apache24_enable="YES" - apache_enable="YES" + If Apache should be started + with non-default options, the following line may be added to + /etc/rc.conf to specify the needed + flags: + + apache24_flags="" + + If apachectl does not report + configuration errors, start httpd + now: + + &prompt.root; service apache24 start + + The httpd service can be tested by + entering + http://localhost + in a web browser, replacing + localhost with the fully-qualified + domain name of the machine running httpd. + The default web page that is displayed is + /usr/local/www/apache24/data/index.html. + + The Apache configuration can be + tested for errors after making subsequent configuration + changes while httpd is running using the + following command: - If you would like to supply additional command line - options for the Apache - httpd program started at system boot, you - may specify them with an additional line in - rc.conf: - - apache_flags="" - - Now that the web server is running, you can view your web - site by pointing a web browser to - http://localhost/. The default web page - that is displayed is - /usr/local/www/data/index.html. + &prompt.root; service apache24 configtest + + It is important to note that + configtest is not an &man.rc.8; standard, + and should not be expected to work for all startup + scripts. + Virtual Hosting - Apache supports two different - types of Virtual Hosting. The first method is Name-based - Virtual Hosting. Name-based virtual hosting uses the clients - HTTP/1.1 headers to figure out the hostname. This allows many - different domains to share the same IP address. + Virtual hosting allows multiple websites to run on one + Apache server. The virtual hosts + can be IP-based or + name-based. + IP-based virtual hosting uses a different + IP address for each website. Name-based + virtual hosting uses the clients HTTP/1.1 headers to figure + out the hostname, which allows the websites to share the same + IP address. To setup Apache to use - Name-based Virtual Hosting add an entry like the following to - your httpd.conf: - - NameVirtualHost * - - If your webserver was named www.domain.tld and - you wanted to setup a virtual domain for - www.someotherdomain.tld then you would add - the following entries to - httpd.conf: + name-based virtual hosting, add a + VirtualHost block for each website. For + example, for the webserver named www.domain.tld with a + virtual domain of www.someotherdomain.tld, + add the following entries to + httpd.conf: <VirtualHost *> -ServerName www.domain.tld -DocumentRoot /www/domain.tld +ServerName www.domain.tld +DocumentRoot /www/domain.tld </VirtualHost> <VirtualHost *> -ServerName www.someotherdomain.tld -DocumentRoot /www/someotherdomain.tld +ServerName www.someotherdomain.tld +DocumentRoot /www/someotherdomain.tld </VirtualHost> - Replace the addresses with the addresses you want to use - and the path to the documents with what you are using. + For each virtual host, replace the values for + ServerName and + DocumentRoot with the values to be + used. For more information about setting up virtual hosts, - please consult the official Apache - documentation at: http://httpd.apache.org/docs/vhosts/. - + consult the official Apache + documentation at: http://httpd.apache.org/docs/vhosts/. @@ -4332,280 +4638,381 @@ Apache modules - There are many different Apache modules available to add - functionality to the basic server. The FreeBSD Ports - Collection provides an easy way to install - Apache together with some of the - more popular add-on modules. + Apache uses modules to augment + the functionality provided by the basic server. Refer to http://httpd.apache.org/docs/current/mod/ + for a complete listing of and the configuration details for + the available modules. + + In &os;, some modules can be compiled with the + www/apache24 port. Type make + config within + /usr/ports/www/apache24 to see which + modules are available and which are enabled by default. If + the module is not compiled with the port, the &os; Ports + Collection provides an easy way to install many modules. This + section describes three of the most commonly used + modules. - mod_ssl + <filename>mod_ssl</filename> - web servers - secure + + web servers + secure + SSL cryptography - The mod_ssl module uses the OpenSSL library to provide - strong cryptography via the Secure Sockets Layer (SSL v2/v3) - and Transport Layer Security (TLS v1) protocols. This - module provides everything necessary to request a signed - certificate from a trusted certificate signing authority so - that you can run a secure web server on &os;. - - If you have not yet installed - Apache, then a version of Apache - 1.3.X that includes mod_ssl may be installed with the www/apache13-modssl port. SSL - support is also available for Apache 2.X in the - www/apache20 port, - where it is enabled by default. - - - - + The mod_ssl module uses the + OpenSSL library to provide strong + cryptography via the Secure Sockets Layer + (SSLv3) and Transport Layer Security + (TLSv1) protocols. This module provides + everything necessary to request a signed certificate from a + trusted certificate signing authority to run a secure web + server on &os;. + + In &os;, mod_ssl module is enabled + by default in both the package and the port. The available + configuration directives are explained at http://httpd.apache.org/docs/current/mod/mod_ssl.html. - Dynamic Websites with Perl & PHP - In the past few years, more businesses have turned to the - Internet in order to enhance their revenue and increase - exposure. This has also increased the need for interactive - web content. While some companies, such as µsoft;, have - introduced solutions into their proprietary products, the - open source community answered the call. Two options for - dynamic web content include mod_perl & mod_php. - - - mod_perl + <filename>mod_perl</filename> - mod_perl - Perl - - - The Apache/Perl integration project brings together the - full power of the Perl programming language and the Apache - HTTP Server. With the mod_perl module it is possible to - write Apache modules entirely in Perl. In addition, the + mod_perl + Perl + + + The + mod_perl module makes it possible to + write Apache modules in + Perl. In addition, the persistent interpreter embedded in the server avoids the overhead of starting an external interpreter and the penalty - of Perl start-up time. + of Perl start-up time. - mod_perl is available a few - different ways. To use mod_perl - remember that mod_perl 1.0 only - works with Apache 1.3 and - mod_perl 2.0 only works with - Apache 2. - mod_perl 1.0 is available in - www/mod_perl and a - statically compiled version is available in - www/apache13-modperl. - mod_perl 2.0 is avaliable in - www/mod_perl2. - + The mod_perl can be installed using + the www/mod_perl2 package or port. + Documentation for using this module can be found at http://perl.apache.org/docs/2.0/index.html. + + + + + <filename>mod_php</filename> - - mod_php - TomRhodesWritten by + + + Tom + Rhodes + + Written by + - - + - mod_php - PHP - - - PHP, also known as PHP: - Hypertext Preprocessor is a general-purpose scripting - language that is especially suited for Web development. - Capable of being embedded into HTML its - syntax draws upon C, &java;, and Perl with the intention of - allowing web developers to write dynamically generated - webpages quickly. + mod_php + PHP + + + PHP: Hypertext Preprocessor + (PHP) is a general-purpose scripting + language that is especially suited for web development. + Capable of being embedded into HTML, its + syntax draws upon C, &java;, and + Perl with the intention of + allowing web developers to write dynamically generated + webpages quickly. To gain support for PHP5 for the - Apache web server, begin by - installing the - www/mod_php5 - port. - - This will install and configure the modules required - to support dynamic PHP applications. Check - to ensure the following lines have been added to - /usr/local/etc/apache/httpd.conf: + Apache web server, install the + www/mod_php5 package or port. This will + install and configure the modules required to support + dynamic PHP applications. The + installation will automatically add this line to + /usr/local/etc/apache24/httpd.conf: + + LoadModule php5_module libexec/apache24/libphp5.so - LoadModule php5_module libexec/apache/libphp5.so + + + Then, perform a graceful restart to load the + PHP module: &prompt.root; apachectl graceful - The PHP support in &os; is extremely - modular so the base install is very limited. It is very easy - to add support using the - lang/php5-extensions port. - This port provides a menu driven interface to - PHP extension installation. - Alternatively, individual extensions can be installed using - the appropriate port. - - For instance, to add support for the - MySQL database server to - PHP5, simply install the - databases/php5-mysql - port. - - After installing an extension, the - Apache server must be reloaded to - pick up the new configuration changes. + The PHP support provided by + www/mod_php5 is limited. Additional + support can be installed using the + lang/php5-extensions port which provides + a menu driven interface to the available + PHP extensions. + + Alternatively, individual extensions can be installed + using the appropriate port. For instance, to add + PHP support for the + MySQL database server, install + databases/php5-mysql. + + After installing an extension, the + Apache server must be reloaded to + pick up the new configuration changes: &prompt.root; apachectl graceful - - - - File Transfer Protocol (FTP) - - MurrayStokelyContributed by - - - + + Dynamic Websites + + + web servers + dynamic + - FTP servers + In addition to mod_perl and + mod_php, other languages are + available for creating dynamic web content. These include + Django and + Ruby on Rails. - - Overview + + Django - The File Transfer Protocol (FTP) provides users with a - simple way to transfer files to and from an FTP server. &os; - includes FTP - server software, ftpd, in the base - system. This makes setting up and administering an FTP server on FreeBSD - very straightforward. + Python + Django + + Django is a BSD-licensed + framework designed to allow developers to write high + performance, elegant web applications quickly. It provides + an object-relational mapper so that data types are developed + as Python objects. A rich + dynamic database-access API is provided + for those objects without the developer ever having to write + SQL. It also provides an extensible + template system so that the logic of the application is + separated from the HTML + presentation. + + Django depends on mod_python, and + an SQL database engine. In &os;, the + www/py-django port automatically installs + mod_python and supports the + PostgreSQL, + MySQL, or + SQLite databases, with the + default being SQLite. To change + the database engine, type make config + within /usr/ports/www/py-django, then + install the port. + + Once Django is installed, the + application will need a project directory along with the + Apache configuration in order to + use the embedded Python + interpreter. This interpreter is used to call the + application for specific URLs on the + site. + + To configure Apache to pass + requests for certain URLs to the web + application, add the following to + httpd.conf, specifying the full path to + the project directory: + + <Location "/"> + SetHandler python-program + PythonPath "['/dir/to/the/django/packages/'] + sys.path" + PythonHandler django.core.handlers.modpython + SetEnv DJANGO_SETTINGS_MODULE mysite.settings + PythonAutoReload On + PythonDebug On +</Location> + + Refer to https://docs.djangoproject.com/en/1.6/ + for more information on how to use + Django. + + + + Ruby on Rails + + Ruby on Rails + + Ruby on Rails is another open + source web framework that provides a full development stack. + It is optimized to make web developers more productive and + capable of writing powerful applications quickly. On &os;, + it can be installed using the + www/rubygem-rails package or port. + + Refer to http://rubyonrails.org/documentation + for more information on how to use Ruby on + Rails. + + + + + + File Transfer Protocol (<acronym>FTP</acronym>) + + FTP + servers + + The File Transfer Protocol (FTP) provides + users with a simple way to transfer files to and from an + FTP server. &os; includes + FTP server software, + ftpd, in the base system. + + &os; provides several configuration files for controlling + access to the FTP server. This section + summarizes these files. Refer to &man.ftpd.8; for more details + about the built-in FTP server. Configuration The most important configuration step is deciding which - accounts will be allowed access to the FTP server. A normal - FreeBSD system has a number of system accounts used for - various daemons, but unknown users should not be allowed to - log in with these accounts. The - /etc/ftpusers file is a list of users - disallowed any FTP access. By default, it includes the - aforementioned system accounts, but it is possible to add - specific users here that should not be allowed access to - FTP. - - You may want to restrict the access of some users without - preventing them completely from using FTP. This can be - accomplished with the /etc/ftpchroot - file. This file lists users and groups subject to FTP access - restrictions. The &man.ftpchroot.5; manual page has all of - the details so it will not be described in detail here. + accounts will be allowed access to the FTP + server. A &os; system has a number of system accounts which + should not be allowed FTP access. The list + of users disallowed any FTP access can be + found in /etc/ftpusers. By default, it + includes system accounts. Additional users that should not be + allowed access to FTP can be added. + + In some cases it may be desirable to restrict the access + of some users without preventing them completely from using + FTP. This can be accomplished be creating + /etc/ftpchroot as described in + &man.ftpchroot.5;. This file lists users and groups subject + to FTP access restrictions. - FTP + FTP anonymous - If you would like to enable anonymous FTP access to your - server, then you must create a user named - ftp on your &os; system. Users will then - be able to log on to your FTP server with a username of - ftp or anonymous and - with any password (by convention an email address for the user - should be used as the password). The FTP server will call - &man.chroot.2; when an anonymous user logs in, to restrict - access to only the home directory of the - ftp user. - - There are two text files that specify welcome messages to - be displayed to FTP clients. The contents of the file + To enable anonymous FTP access to the + server, create a user named ftp on the &os; system. Users + will then be able to log on to the + FTP server with a username of + ftp or anonymous. When prompted for + the password, any input will be accepted, but by convention, + an email address should be used as the password. The + FTP server will call &man.chroot.2; when an + anonymous user logs in, to restrict access to only the home + directory of the ftp user. + + There are two text files that can be created to specify + welcome messages to be displayed to FTP + clients. The contents of /etc/ftpwelcome will be displayed to users before they reach the login prompt. After a successful - login, the contents of the file + login, the contents of /etc/ftpmotd will be displayed. Note - that the path to this file is relative to the login environment, so the - file ~ftp/etc/ftpmotd would be displayed - for anonymous users. - - Once the FTP server has been configured properly, it must - be enabled in /etc/inetd.conf. All that - is required here is to remove the comment symbol - # from in front of the existing - ftpd line : - - ftp stream tcp nowait root /usr/libexec/ftpd ftpd -l - - As explained in , a - HangUP Signal must be sent to inetd - after this configuration file is changed. + that the path to this file is relative to the login + environment, so the contents of + ~ftp/etc/ftpmotd would be displayed for + anonymous users. - You can now log on to your FTP server by typing: + Once the FTP server has been + configured, set the appropriate variable in + /etc/rc.conf to start the service during + boot: - &prompt.user; ftp localhost + ftpd_enable="YES" - + To start the service now: - - Maintaining + &prompt.root; service ftpd start + + Test the connection to the FTP server + by typing: + + &prompt.user; ftp localhost syslog log files - FTP + FTP The ftpd daemon uses - &man.syslog.3; to log messages. By default, the system log - daemon will put messages related to FTP in the - /var/log/xferlog file. The location of - the FTP log can be modified by changing the following line in - /etc/syslog.conf: + &man.syslog.3; to log messages. By default, the system log + daemon will write messages related to FTP + in /var/log/xferlog. The location of + the FTP log can be modified by changing the + following line in + /etc/syslog.conf: ftp.info /var/log/xferlog - FTP + FTP anonymous - Be aware of the potential problems involved with running - an anonymous FTP server. In particular, you should think - twice about allowing anonymous users to upload files. You may - find that your FTP site becomes a forum for the trade of - unlicensed commercial software or worse. If you do need to - allow anonymous FTP uploads, then you should set up the - permissions so that these files can not be read by other - anonymous users until they have been reviewed. - + + Be aware of the potential problems involved with running + an anonymous FTP server. In particular, + think twice about allowing anonymous users to upload files. + It may turn out that the FTP site becomes + a forum for the trade of unlicensed commercial software or + worse. If anonymous FTP uploads are + required, then verify the permissions so that these files + can not be read by other anonymous users until they have + been reviewed by an administrator. + - File and Print Services for µsoft.windows; clients (Samba) + + File and Print Services for µsoft.windows; Clients + (Samba) Samba server Microsoft Windows @@ -4618,70 +5025,87 @@ Windows clients - - Overview - - Samba is a popular open source - software package that provides file and print services for - µsoft.windows; clients. Such clients can connect to and - use FreeBSD filespace as if it was a local disk drive, or - FreeBSD printers as if they were local printers. - - Samba software packages should - be included on your FreeBSD installation media. If you did - not install Samba when you first - installed FreeBSD, then you can install it from the net/samba3 port or package. + Samba is a popular open source + software package that provides file and print services using the + SMB/CIFS protocol. This protocol is built + into µsoft.windows; systems. It can be added to + non-µsoft.windows; systems by installing the + Samba client libraries. The protocol + allows clients to access shared data and printers. These shares + can be mapped as a local disk drive and shared printers can be + used as if they were local printers. + + On &os;, the Samba client + libraries can be installed using the + net/samba-smbclient port or package. The + client provides the ability for a &os; system to access + SMB/CIFS shares in a µsoft.windows; + network. + + A &os; system can also be configured to act as a + Samba server. This allows the + administrator to create SMB/CIFS shares on + the &os; system which can be accessed by clients running + µsoft.windows; or the Samba + client libraries. In order to configure a + Samba server on &os;, the + net/samba36 port or package must first be + installed. The rest of this section provides an overview of how + to configure a Samba server on + &os;. - - Configuration A default Samba configuration - file is installed as - /usr/local/etc/smb.conf.default. This - file must be copied to - /usr/local/etc/smb.conf and customized - before Samba can be used. - - The smb.conf file contains runtime - configuration information for - Samba, such as definitions of the - printers and file system shares that you would - like to share with &windows; clients. The - Samba package includes a web based - tool called swat which provides a - simple way of configuring the smb.conf - file. + file is installed as + /usr/local/share/examples/samba36/smb.conf.default. + This file must be copied to + /usr/local/etc/smb.conf and customized + before Samba can be used. + + Runtime configuration information for + Samba is found in + smb.conf, such as definitions of the + printers and file system shares that will + be shared with &windows; clients. The + Samba package includes a web based + tool called swat which provides a + simple way for configuring + smb.conf. Using the Samba Web Administration Tool (SWAT) The Samba Web Administration Tool (SWAT) runs as a - daemon from inetd. Therefore, the - following line in /etc/inetd.conf - should be uncommented before swat can be - used to configure Samba: - - swat stream tcp nowait/400 root /usr/local/sbin/swat - As explained in , a - HangUP Signal must be sent to - inetd after this configuration - file is changed. - - Once swat has been enabled in - inetd.conf, you can use a browser to - connect to http://localhost:901. You will - first have to log on with the system root account. - - - - Once you have successfully logged on to the main - Samba configuration page, you can - browse the system documentation, or begin by clicking on the - Globals tab. The Globals section corresponds to the + daemon from inetd. Therefore, + inetd must be enabled as shown in + . To enable + swat, uncomment the following + line in /etc/inetd.conf: + + swat stream tcp nowait/400 root /usr/local/sbin/swat swat + + As explained in , + the inetd configuration must be + reloaded after this configuration file is changed. + + Once swat has been enabled, + use a web browser to connect to http://localhost:901. + At first login, enter the credentials for root. + + + + Once logged in, the main + Samba configuration page and the + system documentation will be available. Begin configuration + by clicking on the Globals tab. The + Globals section corresponds to the variables that are set in the [global] section of /usr/local/etc/smb.conf. @@ -4690,19 +5114,18 @@ Global Settings - Whether you are using swat or - editing /usr/local/etc/smb.conf - directly, the first directives you are likely to encounter - when configuring Samba - are: + Whether swat is used or + /usr/local/etc/smb.conf is edited + directly, the first directives encountered when configuring + Samba are: - + workgroup - NT Domain-Name or Workgroup-Name for the computers - that will be accessing this server. + The domain name or workgroup name for the + computers that will be accessing this server. @@ -4710,11 +5133,10 @@ netbios name - NetBIOS - - This sets the NetBIOS name by which a Samba server - is known. By default it is the same as the first - component of the host's DNS name. + The NetBIOS name by which a + Samba server is known. By + default it is the same as the first component of the + host's DNS name. @@ -4722,13 +5144,13 @@ server string - This sets the string that will be displayed with - the net view command and some other + The string that will be displayed in the output of + net view and some other networking tools that seek to display descriptive text about the server. - + @@ -4736,29 +5158,29 @@ Two of the most important settings in /usr/local/etc/smb.conf are the - security model chosen, and the backend password format for - client users. The following directives control these + security model and the backend password format for client + users. The following directives control these options: - + security - The two most common options here are - security = share and security - = user. If your clients use usernames that - are the same as their usernames on your &os; machine - then you will want to use user level security. This - is the default security policy and it requires clients - to first log on before they can access shared - resources. - - In share level security, client do not need to log - onto the server with a valid username and password - before attempting to connect to a shared resource. - This was the default security model for older versions - of Samba. + The two most common options are + security = share and + security = user. If the clients + use usernames that are the same as their usernames on + the &os; machine, user level security should be + used. This is the default security policy and it + requires clients to first log on before they can + access shared resources. + + In share level security, clients do not need to + log onto the server with a valid username and password + before attempting to connect to a shared resource. + This was the default security model for older versions + of Samba. @@ -4766,279 +5188,247 @@ passdb backend + NIS+ + LDAP + SQL database + Samba has several - different backend authentication models. You can - authenticate clients with LDAPLDAP, - NIS+NIS+, a SQL databaseSQL database, - or a modified password file. The default - authentication method is smbpasswd, - and that is all that will be covered here. + different backend authentication models. Clients may + be authenticated with LDAP, NIS+, an SQL database, + or a modified password file. The default + authentication method is smbpasswd, + and that is all that will be covered here. Assuming that the default smbpasswd - backend is used, the - /usr/local/private/smbpasswd file must - be created to allow Samba to - authenticate clients. If you would like to give all of - your &unix; user accounts access from &windows; clients, use the - following command: - - &prompt.root; grep -v "^#" /etc/passwd | make_smbpasswd > /usr/local/private/smbpasswd -&prompt.root; chmod 600 /usr/local/private/smbpasswd - - Please see the Samba - documentation for additional information about configuration - options. With the basics outlined here, you should have - everything you need to start running - Samba. + backend is used, + /usr/local/etc/samba/smbpasswd + must be created to allow Samba to + authenticate clients. To provide &unix; user accounts + access from &windows; clients, use the following command to + add each required user to that file: + + &prompt.root; smbpasswd -a username + + + The recommended backend is now + tdbsam. If this backend is selected, + use the following command to add user accounts: + + &prompt.root; pdbedit -a -u username + + + This section has only mentioned the most commonly used + settings. Refer to the Official + Samba HOWTO for additional information about the + available configuration options. - + Starting <application>Samba</application> - To enable Samba when your - system boots, add the following line to - /etc/rc.conf: + To enable Samba at boot time, + add the following line to + /etc/rc.conf: samba_enable="YES" - You can then start Samba at any - time by typing: + Alternately, its services can be started + separately: + + nmbd_enable="YES" - &prompt.root; /usr/local/etc/rc.d/samba.sh start + smbd_enable="YES" + + To start Samba now: + + &prompt.root; service samba start Starting SAMBA: removing stale tdbs : Starting nmbd. Starting smbd. - Samba actually consists of - three separate daemons. You should see that both the - nmbd and smbd daemons - are started by the samba.sh script. If - you enabled winbind name resolution services in - smb.conf, then you will also see that - the winbindd daemon is started. + Samba consists of three + separate daemons. Both the nmbd + and smbd daemons are started by + samba_enable. If winbind name resolution + services are enabled in smb.conf, the + winbindd daemon is started as + well. - You can stop Samba at any time - by typing : + Samba may be stopped at any + time by typing: - &prompt.root; /usr/local/etc/rc.d/samba.sh stop + &prompt.root; service samba stop Samba is a complex software - suite with functionality that allows broad integration with - µsoft.windows; networks. For more information about - functionality beyond the basic installation described here, - please see http://www.samba.org. + suite with functionality that allows broad integration with + µsoft.windows; networks. For more information about + functionality beyond the basic configuration described here, + refer to http://www.samba.org. - - Clock Synchronization with NTP + + Clock Synchronization with NTP - - Choosing Appropriate NTP Servers - - - NTP - choosing servers - + NTP + ntpd + - In order to synchronize your clock, you will need to find - one or more NTP servers to use. Your network - administrator or ISP may have set up an NTP server for this - purpose—check their documentation to see if this is the - case. There is an online - list of publicly accessible NTP servers which you can - use to find an NTP server near to you. Make sure you are - aware of the policy for any servers you choose, and ask for - permission if required. - - Choosing several unconnected NTP servers is a good idea in - case one of the servers you are using becomes unreachable or - its clock is unreliable. &man.ntpd.8; uses the responses it - receives from other servers intelligently—it will favor - unreliable servers less than reliable ones. - + Over time, a computer's clock is prone to drift. This is + problematic as many network services require the computers on a + network to share the same accurate time. Accurate time is also + needed to ensure that file timestamps stay consistent. The + Network Time Protocol (NTP) is one way to + provide clock accuracy in a network. + + &os; includes &man.ntpd.8; which can be configured to query + other NTP servers in order to synchronize the + clock on that machine or to provide time services to other + computers in the network. The servers which are queried can be + local to the network or provided by an ISP. + In addition, an online + list of publicly accessible NTP + servers is available. When choosing a public + NTP server, select one that is geographically + close and review its usage policy. + + Choosing several NTP servers is + recommended in case one of the servers becomes unreachable or + its clock proves unreliable. As ntpd + receives responses, it favors reliable servers over the less + reliable ones. + + This section describes how to configure + ntpd on &os;. Further documentation + can be found in /usr/share/doc/ntp/ in HTML + format. - Configuring Your Machine + <acronym>NTP</acronym> Configuration - - NTP - configuration + NTP + ntp.conf - - Basic Configuration - ntpdate - - If you only wish to synchronize your clock when the - machine boots up, you can use &man.ntpdate.8;. This may be - appropriate for some desktop machines which are frequently - rebooted and only require infrequent synchronization, but - most machines should run &man.ntpd.8;. - - Using &man.ntpdate.8; at boot time is also a good idea - for machines that run &man.ntpd.8;. The &man.ntpd.8; - program changes the clock gradually, whereas &man.ntpdate.8; - sets the clock, no matter how great the difference between a - machine's current clock setting and the correct time. - - To enable &man.ntpdate.8; at boot time, add - ntpdate_enable="YES" to - /etc/rc.conf. You will also need to - specify all servers you wish to synchronize with and any - flags to be passed to &man.ntpdate.8; in - ntpdate_flags. - - - - General Configuration - - - NTP - ntp.conf - + On &os;, the built-in ntpd can + be used to synchronize a system's clock. To enable + ntpd at boot time, add + ntpd_enable="YES" to + /etc/rc.conf. Additional variables can + be specified in /etc/rc.conf. Refer to + &man.rc.conf.5; and &man.ntpd.8; for + details. + + This application reads /etc/ntp.conf + to determine which NTP servers to query. + Here is a simple example of an + /etc/ntp.conf: - NTP is configured by the - /etc/ntp.conf file in the format - described in &man.ntp.conf.5;. Here is a simple - example: + + Sample <filename>/etc/ntp.conf</filename> server ntplocal.example.com prefer server timeserver.example.org server ntp2a.example.net driftfile /var/db/ntp.drift + - The server option specifies which - servers are to be used, with one server listed on each line. - If a server is specified with the prefer - argument, as with ntplocal.example.com, that server is - preferred over other servers. A response from a preferred - server will be discarded if it differs significantly from - other servers' responses, otherwise it will be used without - any consideration to other responses. The - prefer argument is normally used for NTP - servers that are known to be highly accurate, such as those - with special time monitoring hardware. - - The driftfile option specifies which - file is used to store the system clock's frequency offset. - The &man.ntpd.8; program uses this to automatically - compensate for the clock's natural drift, allowing it to - maintain a reasonably correct setting even if it is cut off - from all external time sources for a period of time. - - The driftfile option specifies which - file is used to store information about previous responses - from the NTP servers you are using. This file contains - internal information for NTP. It should not be modified by - any other process. - + The format of this file is described in &man.ntp.conf.5;. + The server option specifies which servers + to query, with one server listed on each line. If a server + entry includes prefer, that server is + preferred over other servers. A response from a preferred + server will be discarded if it differs significantly from + other servers' responses; otherwise it will be used. The + prefer argument should only be used for + NTP servers that are known to be highly + accurate, such as those with special time monitoring + hardware. + + The driftfile entry specifies which + file is used to store the system clock's frequency offset. + ntpd uses this to automatically + compensate for the clock's natural drift, allowing it to + maintain a reasonably correct setting even if it is cut off + from all external time sources for a period of time. This + file also stores information about previous responses + from NTP servers. Since this file contains + internal information for NTP, it should not + be modified. + + By default, an NTP server is accessible + to any network host. The restrict option + in /etc/ntp.conf can be used to control + which systems can access the server. For example, to deny all + machines from accessing the NTP server, add + the following line to + /etc/ntp.conf: - - Controlling Access to Your Server + restrict default ignore - By default, your NTP server will be accessible to all - hosts on the Internet. The restrict - option in /etc/ntp.conf allows you to - control which machines can access your server. - - If you want to deny all machines from accessing your NTP - server, add the following line to - /etc/ntp.conf: - - restrict default ignore - - If you only want to allow machines within your own - network to synchronize their clocks with your server, but - ensure they are not allowed to configure the server or used - as peers to synchronize against, add - - restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap - - instead, where 192.168.1.0 is - an IP address on your network and 255.255.255.0 is your network's - netmask. - - /etc/ntp.conf can contain multiple - restrict options. For more details, see - the Access Control Support subsection of - &man.ntp.conf.5;. - - + + This will also prevent access from other + NTP servers. If there is a need to + synchronize with an external NTP server, + allow only that specific server. Refer to &man.ntp.conf.5; + for more information. + - - Running the NTP Server + To allow machines within the network to synchronize their + clocks with the server, but ensure they are not allowed to + configure the server or be used as peers to synchronize + against, instead use: - To ensure the NTP server is started at boot time, add the - line ntpd_enable="YES" to - /etc/rc.conf. If you wish to pass - additional flags to &man.ntpd.8;, edit the - ntpd_flags parameter in - /etc/rc.conf. - - To start the server without rebooting your machine, run - ntpd being sure to specify any additional - parameters from ntpd_flags in - /etc/rc.conf. For example: + restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap - &prompt.root; ntpd -p /var/run/ntpd.pid + where 192.168.1.0 is the local + network address and 255.255.255.0 is the network's + subnet mask. - - Under &os; 4.X, - you have to replace every instance of ntpd - with xntpd in the options above. + Multiple restrict entries are + supported. For more details, refer to the Access + Control Support subsection of + &man.ntp.conf.5;. + + Once ntpd_enable="YES" has been added + to /etc/rc.conf, + ntpd can be started now without + rebooting the system by typing: + + &prompt.root; service ntpd start - Using ntpd with a Temporary Internet - Connection + Using <acronym>NTP</acronym> with a + <acronym>PPP</acronym> Connection - The &man.ntpd.8; program does not need a permanent + ntpd does not need a permanent connection to the Internet to function properly. However, if - you have a temporary connection that is configured to dial out - on demand, it is a good idea to prevent NTP traffic from - triggering a dial out or keeping the connection alive. If you - are using user PPP, you can use filter + a PPP connection is configured to dial out + on demand, NTP traffic should be prevented + from triggering a dial out or keeping the connection alive. + This can be configured with filter directives in /etc/ppp/ppp.conf. For example: @@ -5051,23 +5441,349 @@ # Prevent outgoing NTP traffic from keeping the connection open set filter alive 2 permit 0/0 0/0 - For more details see the PACKET - FILTERING section in &man.ppp.8; and the examples in + For more details, refer to the + PACKET FILTERING section in &man.ppp.8; and + the examples in /usr/share/examples/ppp/. Some Internet access providers block low-numbered ports, - preventing NTP from functioning since replies never - reach your machine. + preventing NTP from functioning since replies never reach + the machine. + - - Further Information + + + + <acronym>iSCSI</acronym> Initiator and Target + Configuration - Documentation for the NTP server can be found in - /usr/share/doc/ntp/ in HTML - format. + iSCSI is a way to share storage over a + network. Unlike NFS, which works at the file + system level, iSCSI works at the block device + level. + + In iSCSI terminology, the system that + shares the storage is known as the target. + The storage can be a physical disk, or an area representing + multiple disks or a portion of a physical disk. For example, if + the disk(s) are formatted with ZFS, a zvol + can be created to use as the iSCSI + storage. + + The clients which access the iSCSI + storage are called initiators. To + initiators, the storage available through + iSCSI appears as a raw, unformatted disk + known as a LUN. Device nodes for the disk + appear in /dev/ and the device must be + separately formatted and mounted. + + Beginning with 10.0-RELEASE, &os; provides a native, + kernel-based iSCSI target and initiator. + This section describes how to configure a &os; system as a + target or an initiator. + + + Configuring an <acronym>iSCSI</acronym> Target + + + The native iSCSI target is supported + starting with &os; 10.0-RELEASE. To use + iSCSI in older versions of &os;, install + a userspace target from the Ports Collection, such as + net/istgt. This chapter only describes + the native target. + + + To configure an iSCSI target, create + the /etc/ctl.conf configuration file, add + a line to /etc/rc.conf to make sure the + &man.ctld.8; daemon is automatically started at boot, and then + start the daemon. + + The following is an example of a simple + /etc/ctl.conf configuration file. Refer + to &man.ctl.conf.5; for a more complete description of this + file's available options. + + portal-group pg0 { + discovery-auth-group no-authentication + listen 0.0.0.0 + listen [::] +} + +target iqn.2012-06.com.example:target0 { + auth-group no-authentication + portal-group pg0 + + lun 0 { + path /data/target0-0 + size 4G + } +} + + The first entry defines the pg0 portal + group. Portal groups define which network addresses the + &man.ctld.8; daemon will listen on. The + discovery-auth-group no-authentication + entry indicates that any initiator is allowed to perform + iSCSI target discovery without + authentication. Lines three and four configure &man.ctld.8; + to listen on all IPv4 + (listen 0.0.0.0) and + IPv6 (listen [::]) + addresses on the default port of 3260. + + It is not necessary to define a portal group as there is a + built-in portal group called default. In + this case, the difference between default + and pg0 is that with + default, target discovery is always denied, + while with pg0, it is always + allowed. + + The second entry defines a single target. Target has two + possible meanings: a machine serving iSCSI + or a named group of LUNs. This example + uses the latter meaning, where + iqn.2012-06.com.example:target0 is the + target name. This target name is suitable for testing + purposes. For actual use, change + com.example to the real domain name, + reversed. The 2012-06 represents the year + and month of acquiring control of that domain name, and + target0 can be any value. Any number of + targets can be defined in this configuration file. + + The auth-group no-authentication line + allows all initiators to connect to the specified target and + portal-group pg0 makes the target reachable + through the pg0 portal group. + + The next section defines the LUN. To + the initiator, each LUN will be visible as + a separate disk device. Multiple LUNs can + be defined for each target. Each LUN is + identified by a number, where LUN 0 is + mandatory. The path /data/target0-0 line + defines the full path to a file or zvol backing the + LUN. That path must exist before starting + &man.ctld.8;. The second line is optional and specifies the + size of the LUN. + + Next, to make sure the &man.ctld.8; daemon is started at + boot, add this line to + /etc/rc.conf: + + ctld_enable="YES" + + To start &man.ctld.8; now, run this command: + + &prompt.root; service ctld start + + As the &man.ctld.8; daemon is started, it reads + /etc/ctl.conf. If this file is edited + after the daemon starts, use this command so that the changes + take effect immediately: + + &prompt.root; service ctld reload + + + Authentication + + The previous example is inherently insecure as it uses + no authentication, granting anyone full access to all + targets. To require a username and password to access + targets, modify the configuration as follows: + + auth-group ag0 { + chap username1 secretsecret + chap username2 anothersecret +} + +portal-group pg0 { + discovery-auth-group no-authentication + listen 0.0.0.0 + listen [::] +} + +target iqn.2012-06.com.example:target0 { + auth-group ag0 + portal-group pg0 + lun 0 { + path /data/target0-0 + size 4G + } +} + + The auth-group section defines + username and password pairs. An initiator trying to connect + to iqn.2012-06.com.example:target0 must + first specify a defined username and secret. However, + target discovery is still permitted without authentication. + To require target discovery authentication, set + discovery-auth-group to a defined + auth-group name instead of + no-authentication. + + It is common to define a single exported target for + every initiator. As a shorthand for the syntax above, the + username and password can be specified directly in the + target entry: + + target iqn.2012-06.com.example:target0 { + portal-group pg0 + chap username1 secretsecret + + lun 0 { + path /data/target0-0 + size 4G + } +} + + + + + Configuring an <acronym>iSCSI</acronym> Initiator + + + The iSCSI initiator described in this + section is supported starting with &os; 10.0-RELEASE. To + use the iSCSI initiator available in + older versions, refer to &man.iscontrol.8;. + + + The iSCSI initiator requires that the + &man.iscsid.8; daemon is running. This daemon does not use a + configuration file. To start it automatically at boot, add + this line to /etc/rc.conf: + + iscsid_enable="YES" + + To start &man.iscsid.8; now, run this command: + + &prompt.root; service iscsid start + + Connecting to a target can be done with or without an + /etc/iscsi.conf configuration file. This + section demonstrates both types of connections. + + + Connecting to a Target Without a Configuration + File + + To connect an initiator to a single target, specify the + IP address of the portal and the name of + the target: + + &prompt.root; iscsictl -A -p 10.10.10.10 -t iqn.2012-06.com.example:target0 + + To verify if the connection succeeded, run + iscsictl without any arguments. The + output should look similar to this: + + Target name Target portal State +iqn.2012-06.com.example:target0 10.10.10.10 Connected: da0 + + In this example, the iSCSI session + was successfully established, with + /dev/da0 representing the attached + LUN. If the + iqn.2012-06.com.example:target0 target + exports more than one LUN, multiple + device nodes will be shown in that section of the + output: + + Connected: da0 da1 da2. + + Any errors will be reported in the output, as well as + the system logs. For example, this message usually means + that the &man.iscsid.8; daemon is not running: + + Target name Target portal State +iqn.2012-06.com.example:target0 10.10.10.10 Waiting for iscsid(8) + + The following message suggests a networking problem, + such as a wrong IP address or + port: + + Target name Target portal State +iqn.2012-06.com.example:target0 10.10.10.11 Connection refused + + This message means that the specified target name is + wrong: + + Target name Target portal State +iqn.2012-06.com.example:atrget0 10.10.10.10 Not found + + This message means that the target requires + authentication: + + Target name Target portal State +iqn.2012-06.com.example:target0 10.10.10.10 Authentication failed + + To specify a CHAP username and + secret, use this syntax: + + &prompt.root; iscsictl -A -p 10.10.10.10 -t iqn.2012-06.com.example:target0 -u user -s secretsecret + + + + Connecting to a Target with a Configuration + File + + To connect using a configuration file, create + /etc/iscsi.conf with contents like + this: + + t0 { + TargetAddress = 10.10.10.10 + TargetName = iqn.2012-06.com.example:target0 + AuthMethod = CHAP + chapIName = user + chapSecret = secretsecret +} + + The t0 specifies a nickname for the + configuration file section. It will be used by the + initiator to specify which configuration to use. The other + lines specify the parameters to use during connection. The + TargetAddress and + TargetName are mandatory, whereas the + other options are optional. In this example, the + CHAP username and secret are + shown. + + To connect to the defined target, specify the + nickname: + + &prompt.root; iscsictl -An t0 + + Alternately, to connect to all targets defined in the + configuration file, use: + + &prompt.root; iscsictl -Aa + + To make the initiator automatically connect to all + targets in /etc/iscsi.conf, add the + following to /etc/rc.conf: + + iscsictl_enable="YES" +iscsictl_flags="-Aa" + + Index: zh_TW.UTF-8/books/handbook/ports/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/ports/chapter.xml +++ zh_TW.UTF-8/books/handbook/ports/chapter.xml @@ -1,9 +1,10 @@ 軟體套件管理篇:Packages 及 Ports 機制 @@ -300,6 +301,7 @@
+ - - 其他細節部份 - 所有已裝的 package 資訊都會存到 /var/db/pkg - 目錄內,在該目錄下可以找到記載已裝的軟體檔案清單及該軟體簡介的檔案。 - + + Using <application>pkg</application> for Binary Package + Management + + pkg is the next generation + replacement for the traditional &os; package management tools, + offering many features that make dealing with binary packages + faster and easier. + + pkg is not a replacement for + port management tools like + ports-mgmt/portmaster or + ports-mgmt/portupgrade. These tools can be + used to install third-party software from both binary packages + and the Ports Collection, while + pkg installs only binary + packages. + + + Getting Started with + <application>pkg</application> + + &os; 8.4 and later includes a bootstrap utility + which can be used to download and install + pkg, along with its manual + pages. + + To bootstrap the system, run: + + &prompt.root; /usr/sbin/pkg + + For earlier &os; versions, + pkg must instead be installed + from the Ports Collection or as a binary package. + + To install the port, run: + + &prompt.root; cd /usr/ports/ports-mgmt/pkg +&prompt.root; make +&prompt.root; make install clean + + When upgrading an existing system that originally used the + older package system, the database must be converted to the + new format, so that the new tools are aware of the already + installed packages. Once pkg has + been installed, the + package database must be converted from the traditional format + to the new format by running this command: + + &prompt.root; pkg2ng + + This step is not required for new installations that + do not yet have any third-party software + installed. + + + This step is not reversible. Once the package database + has been converted to the pkg + format, the traditional pkg_* tools + should no longer be used. + + + + The package database conversion may emit errors as the + contents are converted to the new version. Generally, these + errors can be safely ignored. However, a list of + third-party software that was not successfully converted + will be listed after pkg2ng has finished + and these applications must be manually reinstalled. + + + To ensure that the &os; Ports Collection registers + new software with pkg, and not + the traditional packages format, &os; versions earlier than + 10.X require this line in + /etc/make.conf: + + WITH_PKGNG= yes + + The pkg package management + system uses a package repository for most operations. The + default package repository location is defined in + /usr/local/etc/pkg.conf or by the + PACKAGESITE environment variable, which + overrides the configuration file. + + Additional pkg + configuration options are described in pkg.conf(5). + + Usage information for pkg is + available in pkg(8) or by running + pkg without additional arguments. + + Each pkg command argument is + documented in a command-specific manual page. To read the + manual page for pkg install, for example, + run either of these commands: + + &prompt.root; pkg help install + + &prompt.root; man pkg-install + + The rest of this section demonstrates common binary + package management tasks which can be performed using + pkg. Each demonstrated command + provides many switches to customize its use. Refer to a + command's help or man page for details and more + examples. + + + + Obtaining Information About Installed Packages + + Information about the packages installed on a system + can be viewed by running pkg info which, + when run without any switches, will list the package version + for either all installed packages or the specified + package. + + For example, to see which version of + pkg is installed, run: + + &prompt.root; pkg info pkg +pkg-1.1.4_1 + + + + Installing and Removing Packages + + To install a binary package use the following command, + where packagename is the name of + the package to install: + + &prompt.root; pkg install packagename + + This command uses repository data to determine which + version of the software to install and if it has any + uninstalled dependencies. For example, to install + curl: + + &prompt.root; pkg install curl +Updating repository catalogue +/usr/local/tmp/All/curl-7.31.0_1.txz 100% of 1181 kB 1380 kBps 00m01s + +/usr/local/tmp/All/ca_root_nss-3.15.1_1.txz 100% of 288 kB 1700 kBps 00m00s + +Updating repository catalogue +The following 2 packages will be installed: + + Installing ca_root_nss: 3.15.1_1 + Installing curl: 7.31.0_1 + +The installation will require 3 MB more space + +0 B to be downloaded + +Proceed with installing packages [y/N]: y +Checking integrity... done +[1/2] Installing ca_root_nss-3.15.5_1... done +[2/2] Installing curl-7.31.0_1... done +Cleaning up cache files...Done + + The new package and any additional packages that were + installed as dependencies can be seen in the installed + packages list: + + &prompt.root; pkg info +ca_root_nss-3.15.5_1 The root certificate bundle from the Mozilla Project +curl-7.31.0_1 Non-interactive tool to get files from FTP, GOPHER, HTTP(S) servers +pkg-1.1.4_6 New generation package manager + + Packages that are no longer needed can be removed with + pkg delete. For example: + + &prompt.root; pkg delete curl +The following packages will be deleted: + + curl-7.31.0_1 + +The deletion will free 3 MB + +Proceed with deleting packages [y/N]: y +[1/1] Deleting curl-7.31.0_1... done + + + + Upgrading Installed Packages + + Packages that are outdated can be found with + pkg version. If a local ports tree + does not exist, pkg-version(8) will use the remote + repository catalogue. Otherwise, the local ports tree will + be used to identify package versions. + + Installed packages can be upgraded to their latest + versions by typing pkg upgrade. This + command will compare the installed versions with those + available in the repository catalogue. When finished, it + will list the applications that have newer versions. Type + y to proceed with the upgrade or + n to cancel the upgrade. + + + + Auditing Installed Packages + + Occasionally, software vulnerabilities may be discovered + in third-party applications. To address this, + pkg includes a built-in auditing + mechanism. To determine if there are any known + vulnerabilities for the software installed on the system, + run: + + &prompt.root; pkg audit -F + + + + Automatically Removing Leaf Dependencies + + Removing a package may leave behind dependencies which + are no longer required. Unneeded packages that were installed + as dependencies can be automatically detected and removed + using: + + &prompt.root; pkg autoremove +Packages to be autoremoved: + ca_root_nss-3.13.5 + +The autoremoval will free 723 kB + +Proceed with autoremoval of packages [y/N]: y +Deinstalling ca_root_nss-3.15.1_1... done + + + + Backing Up the Package Database + + Unlike the traditional package management system, + pkg includes its own package + database backup mechanism. To manually back up the contents + of the package database, run the following command, replacing + pkgng.db with a suitable file + name: + + &prompt.root; pkg backup -d pkgng.db + + Additionally, pkg includes + a &man.periodic.8; script to automatically perform a daily + back up of the package database. This functionality is + enabled if daily_backup_pkgdb_enable is + set to YES in &man.periodic.conf.5;. + + + To disable the periodic script from backing up the + package database, set + daily_backup_pkgdb_enable to + NO in &man.periodic.conf.5;. + + + To restore the contents of a previous package database + backup, run: + + &prompt.root; pkg backup -r /path/to/pkgng.db + + + + Removing Stale Packages + + By default, pkg stores + binary packages in a cache directory defined by + PKG_CACHEDIR in pkg.conf(5). When upgrading + packages with pkg upgrade, old versions + of the upgraded packages are not automatically removed. + + To remove these outdated binary packages, run: + + &prompt.root; pkg clean + + + + Modifying Package Metadata + + Software within the &os; Ports Collection can + undergo major version number changes. To address this, + pkg has a built-in command to + update package origins. This can be useful, for example, if + lang/php5 is renamed to + lang/php53 so that + lang/php5 can now + represent version 5.4. + + To change the package origin for the above example, + run: + + &prompt.root; pkg set -o lang/php5:lang/php53 + + As another example, to update + lang/ruby18 to + lang/ruby19, run: + + &prompt.root; pkg set -o lang/ruby18:lang/ruby19 + + As a final example, to change the origin of the + libglut shared libraries from + graphics/libglut to + graphics/freeglut, run: + + &prompt.root; pkg set -o graphics/libglut:graphics/freeglut + + + When changing package origins, it is important to + reinstall packages that are dependent on the package with + the modified origin. To force a reinstallation of dependent + packages, run: + + &prompt.root; pkg install -Rf graphics/freeglut + 使用 Ports 管理機制 - 下面我們會介紹如何使用 Ports Collection 來安裝、移除軟體的基本用法。 - 至於其他可用的 make 詳細用法與環境設定,可參閱 - &man.ports.7;。 + The Ports Collection is a set of + Makefiles, patches, and description files + stored in /usr/ports. This set of files is + used to compile and install applications on &os;. Before an + application can be compiled using a port, the Ports Collection + must first be installed. If it was not installed during the + installation of &os;, use one of the following methods to + install it: + + + Portsnap 方式 + + The base system of &os; includes + Portsnap. This is a fast and + user-friendly tool for retrieving the Ports Collection and + is the recommended choice for most users. This utility + connects to a &os; site, verifies the secure key, and + downloads a new copy of the Ports Collection. The key is used + to verify the integrity of all downloaded files. - - 記得安裝 Ports Collection + + To download a compressed snapshot of the Ports + Collection into + /var/db/portsnap: - 在安裝任一 ports 之前,必須先裝上 - Ports Collection —— 它主要是由 /usr/ports 內一堆 - Makefiles, patches 以及一些軟體簡介檔所組成的。 - + &prompt.root; portsnap fetch + - 在裝 FreeBSD 時,若忘了在 sysinstall - 內勾選要裝 Ports Collection 的話, - 沒關係,可以照下列方式來安裝 ports collection: - - - CVSup 方式 - - 使用 CVSup 是安裝、更新 Ports - Collection 的快速方法之一。 - 若想更瞭解 CVSup 用法的話,請參閱 使用 CVSup。 + + 若是第一次跑 Portsnap 的話, + 則需要先解壓到 /usr/ports - - csup 是以 C 語言對 - CVSup 軟體的重寫,在 &os; 6.2 - 及之後版本即有附在系統內。 可以直接用系統所附的 - csup 即可跳過步驟一的動作, - 並將本文相關提到 cvsup 之處, - 都改為 csup 即可。 此外, &os; 6.2 - 之前的版本,則可裝 net/csup - 或者 package 來使用 csup - + &prompt.root; portsnap extract + - 第一次跑 CVSup 之前,請先確認 - /usr/ports - 是乾淨的! 若你已經裝了 Ports Collection ,但又自行加上其他 patch - 檔,那麼 CVSup - 並不會刪除你自行加上的 patch 檔,這樣可能會導致要安裝某些軟體時, - 發生 patch 失敗或編譯失敗。 - - - 安裝 net/cvsup-without-gui - package: - - &prompt.root; pkg_add -r cvsup-without-gui - - 細節用法請參閱 安裝 CVSup()。 - - - - 執行 cvsup - - &prompt.root; cvsup -L 2 -h cvsup.tw.FreeBSD.org /usr/share/examples/cvsup/ports-supfile - - 請把 - cvsup.tw.FreeBSD.org 請改成離你比較近 - (快)的 CVSup 主機。 - 這部分可以參閱完整的 CVSup mirror - 站列表()。 + + After the first use of + Portsnap has been completed as + shown above, /usr/ports can be updated + as needed by running: + + &prompt.root; portsnap fetch +&prompt.root; portsnap update + + When using fetch, the + extract or the update + operation may be run consecutively, like so: - - 若想改用自己設的 - ports-supfile,比如說, - 不想每次都得打指令來指定所使用的 - CVSup 主機。 - - - - 這種情況下,請以 root 權限把 - /usr/share/examples/cvsup/ports-supfile - 複製到其他位置,比如 - /root 或者自己帳號的家目錄。 - - - - 修改新的 ports-supfile 檔。 - - - - 把 - CHANGE_THIS.FreeBSD.org - 改為離你比較近(快)的 CVSup 主機。 - 這部分可以參閱完整的 CVSup - Mirrors () 站列表 - - - - 然後就開始以類似下列指令跑 cvsup: - - - &prompt.root; cvsup -L 2 /root/ports-supfile - - - - + &prompt.root; portsnap fetch update + + - - 執行 &man.cvsup.1; 之後,就會開始更新 Ports Collection。 - 不過這動作只是『更新』並不是『升級』,不會把已裝的軟體重新編譯、升級。 - - - - - Portsnap 方式 - - &man.portsnap.8; 也是更新 Ports Collection 的方式之一。 - &os; 6.0 起開始內建 Portsnap 機制,而較舊的系統,則可透過 - ports-mgmt/portsnap port 來安裝: - + + Subversion Method - &prompt.root; pkg_add -r portsnap + If more control over the ports tree is needed or if local + changes need to be maintained, + Subversion can be used to obtain + the Ports Collection. Refer to the + Subversion Primer for a detailed description of + Subversion. - Portsnap 細節功能,請參閱 - Portsnap 使用篇。 + + Subversion must be installed + before it can be used to check out the ports tree. If a + copy of the ports tree is already present, install + Subversion like this: - - /usr/ports 目錄不存在的話, - 就建立一下吧: + &prompt.root; cd /usr/ports/devel/subversion +&prompt.root; make install clean - &prompt.root; mkdir /usr/ports - + If the ports tree is not available, or + pkg is being used to manage + packages, Subversion can be + installed as a package: - - 接下來,下載壓縮的 Ports Collection 定期更新檔到 - /var/db/portsnap 目錄。 - 完成下載後,要斷線與否都可以。 + &prompt.root; pkg install subversion - &prompt.root; portsnap fetch - + - - 若是第一次跑 Portsnap 的話, - 則需要先解壓到 /usr/ports: - + + Check out a copy of the ports tree. For better + performance, replace + svn0.us-east.FreeBSD.org with a + Subversion + mirror close to your geographic location: - &prompt.root; portsnap extract + &prompt.root; svn checkout https://svn0.us-east.FreeBSD.org/ports/head /usr/ports + - 若已有 /usr/ports 而且只是想更新而已, - 那麼就照下面作: + + As needed, update /usr/ports after + the initial Subversion + checkout: - &prompt.root; portsnap update - + &prompt.root; svn update /usr/ports + + - - - - Sysinstall 方式 - - 這方式要用 sysinstall - 透過安裝來源來裝 Ports Collection。 - 請注意:所安裝的 Ports Collection 版本只是該 release - 發佈時的版本而已,而非最新。 - 若能上網(Internet)的話,請使用上述方式之一會比較好。 - - - root 權限執行 - sysinstall - (在 &os; 5.2 之前版本則是 /stand/sysinstall) - ,方式如下: - - &prompt.root; sysinstall - - - - 請以方向鍵移動選擇項目,選擇 - Configure,然後按 - Enter 鍵。 - - - - 選擇 - Distributions,然後按 - Enter 鍵。 - - - - 選擇 ports,然後按 - Space 鍵。 - - - - Exit,然後按 - Enter 鍵。 - - - - 選擇要用的安裝來源,比如:CDROM(光碟)、FTP 等方式。 - - - - Exit,然後按 - Enter 鍵。 - - - - 按下 X 鍵就可離開 - sysinstall 程式。 - - - + The Ports Collection installs a series of directories + representing software categories with each category having + a subdirectory for each application. Each subdirectory, also + referred to as a ports skeleton, contains a set of files that + tell &os; how to compile and install that program. Each port + skeleton includes these files and directories: + + + + Makefile: contains statements that + specify how the application should be compiled and where + its components should be installed. + + + + distinfo: contains the names and + checksums of the files that must be downloaded to build the + port. + + + + files/: this directory contains + any patches needed for the program to compile and install + on &os;. This directory may also contain other files used + to build the port. + + + + pkg-descr: provides a more detailed + description of the program. + + + + pkg-plist: a list of all the + files that will be installed by the port. It also tells + the ports system which files to remove upon + deinstallation. + + + + Some ports include pkg-message or + other files to handle special situations. For more details + on these files, and on ports in general, refer to the &os; + Porter's Handbook. + + The port does not include the actual source code, also + known as a distfile. The extract portion + of building a port will automatically save the downloaded + source to /usr/ports/distfiles. Ports 的安裝方式 - ports - installing + ports + installing + + 下面我們會介紹如何使用 Ports Collection 來安裝、移除軟體的基本用法。 + 至於其他可用的 make 詳細用法與環境設定,可參閱 + &man.ports.7;。 + + + Before compiling any port, be sure to update the Ports + Collection as described in the previous section. Since + the installation of any third-party software can introduce + security vulnerabilities, it is recommended to first check + http://vuxml.freebsd.org/ + for known security issues related to the port. Alternately, + if ports-mgmt/portaudit is installed, run + portaudit -F before installing a new + port. This command can be configured to automatically + perform a security audit and an update of the vulnerability + database during the daily security system check. For more + information, refer to the manual page for + portaudit and + &man.periodic.8;. + + 提到 Ports Collection,首先要先說明的是:何謂 skeleton。 簡單來講,port skeleton 就是讓軟體如何在 FreeBSD @@ -1016,7 +1304,7 @@ 比如裝了一個 port 後才意識到裝錯 port 了。 在此,我們將移除前面例子所裝的那個 port (沒仔細注意的話,我們再提醒一下就是 lsof)。 - 跟移除 package 時相當類似(在 Packages section 有介紹),都是使用 + 跟移除 package 時相當類似,都是使用 &man.pkg.delete.1; 指令: &prompt.root; pkg_delete lsof-4.57 Index: zh_TW.UTF-8/books/handbook/preface/preface.xml =================================================================== --- zh_TW.UTF-8/books/handbook/preface/preface.xml +++ zh_TW.UTF-8/books/handbook/preface/preface.xml @@ -1,7 +1,7 @@ @@ -42,11 +42,6 @@ - Vinum是本版所新增的章節。本章介紹:如何運用 Vinum 這種邏輯磁碟(device-independent) - ,以及軟體 RAID-0, RAID-1 和 RAID-5 。 - - - PPP 及 SLIP 一章中增加了故障排除的說明。 @@ -244,83 +239,136 @@ - , Configuration and Tuning + + Describes the parameters available for system - administrators to tune a FreeBSD system for optimum - performance. Also describes the various configuration files - used in FreeBSD and where to find them. + administrators to tune a &os; system for optimum + performance. Also describes the various configuration files + used in &os; and where to find them. + + + + + + + + Describes the &os; boot process and explains how to + control this process with configuration options. + - , Booting Process + + - Describes the FreeBSD boot process and explains - how to control this process with configuration options. + Describes many different tools available to help keep + your &os; system secure, including Kerberos, IPsec and + OpenSSH. + - , Users and Basic Account - Management + + - Describes the creation and manipulation of user - accounts. Also discusses resource limitations that can be - set on users and other account management tasks. + Describes the jails framework, and the improvements of + jails over the traditional chroot support of &os;. + - , Security + + - Describes many different tools available to help keep your - FreeBSD system secure, including Kerberos, IPsec and OpenSSH. + Explains what Mandatory Access Control (MAC) is and + how this mechanism can be used to secure a &os; + system. + - , Mandatory Access Control + + - Explains what Mandatory Access Control (MAC) is and how this - mechanism can be used to secure a FreeBSD system. + Describes what &os; Event Auditing is, how it can be + installed, configured, and how audit trails can be inspected + or monitored. + - , Storage + + Describes how to manage storage media and filesystems - with FreeBSD. This includes physical disks, RAID arrays, - optical and tape media, memory-backed disks, and network - filesystems. + with &os;. This includes physical disks, RAID arrays, + optical and tape media, memory-backed disks, and network + filesystems. + - , GEOM + + - Describes what the GEOM framework in FreeBSD is and how + Describes what the GEOM framework in &os; is and how to configure various supported RAID levels. + + + + + + Examines support of non-native file systems in &os;, + like the Z File System from &sun;. + + + + + + + + Describes what virtualization systems offer, and how + they can be used with &os;. + + + - , Vinum + + - Describes how to use Vinum, a logical volume manager - which provides device-independent logical disks, and - software RAID-0, RAID-1 and RAID-5. + Describes how to use &os; in languages other than + English. Covers both system and application level + localization. + - , Localization + + - Describes how to use FreeBSD in languages other than - English. Covers both system and application level - localization. + Explains the differences between &os;-STABLE, + &os;-CURRENT, and &os; releases. Describes which users + would benefit from tracking a development system and + outlines that process. Covers the methods users may take + to update their system to the latest security + release. + - , The Cutting Edge + + - Explains the differences between FreeBSD-STABLE, - FreeBSD-CURRENT, and FreeBSD releases. Describes which users - would benefit from tracking a development system and outlines - that process. + Describes how to configure and use the &dtrace; tool + from &sun; in &os;. Dynamic tracing can help locate + performance issues, by performing real time system + analysis. Index: zh_TW.UTF-8/books/handbook/security/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/security/chapter.xml +++ zh_TW.UTF-8/books/handbook/security/chapter.xml @@ -1,18 +1,28 @@ - - 系統安全 + + + + 系統安全 + - MatthewDillonMuch of this chapter has been taken from the - security(7) manual page by + + + Tom + Rhodes + + Rewritten by + - security @@ -96,1063 +106,619 @@ - 介紹 - - 安全,對系統管理者而言,是至始至終最基本的要求。由於所有的 BSD &unix; multi-user - 系統都提供了與生俱來的基本安全,所以建立、維護額外的安全機制,以確保使用者的『可靠』, - 可能也就是系統管理員最需要慎思的艱巨任務了。機器的安全性取決於您所建立的安全措施, - 而許多安全方面的考量,則會與人們使用電腦時的便利相矛盾。一般來說, &unix; - 系統可同時執行許多數目的程式 process ,並且其中許多 process 也同時以 Server 端來運作。 - ── 這意味著,外部實體機器能夠與它們互相連接,並產生互動。現在的一般桌機, - 已經能夠達到以前小型主機甚至大型主機的性能,而隨著這些電腦的網路連接和在更大範圍內互相連接 - ,安全也成為了一個日益嚴峻的課題。 - - 安全最好的方式,是能夠透過像『洋蔥』那樣的層層防護模式。 - 簡單講,應該儘可能的建立多層次安全防護,並小心地監視各類針對系統的入侵疑點。 - You do not want to - overbuild your security or you will interfere with the detection - side, and detection is one of the single most important aspects of - any security mechanism. For example, it makes little sense to set - the schg flag (see &man.chflags.1;) on every - system binary because - while this may temporarily protect the binaries, it prevents an - attacker who has broken in from making an easily detectable change - that may result in your security mechanisms not detecting the attacker - at all. - - System security also pertains to dealing with various forms of - attack, including attacks that attempt to crash, or otherwise make a - system unusable, but do not attempt to compromise the - root account (break root). - Security concerns - can be split up into several categories: - - - - 服務阻斷攻擊(DoS) - - - - 竊取其他使用者的帳號。 - - - - 透過各式 Server 上所提供的 Service 來竊取 root 帳號。 - - - - 透過使用者帳號竊取 root 帳號。 - - - - 開後門。 - - - - - DoS attacks - Denial of Service (DoS) - - - security - DoS attacks - Denial of Service (DoS) - - Denial of Service (DoS) - - A denial of service attack is an action that deprives the - machine of needed resources. Typically, DoS attacks are - brute-force mechanisms that attempt to crash or otherwise make a - machine unusable by overwhelming its servers or network stack. Some - DoS attacks try to take advantage of bugs in the networking - stack to crash a machine with a single packet. The latter can only - be fixed by applying a bug fix to the kernel. Attacks on servers - can often be fixed by properly specifying options to limit the load - the servers incur on the system under adverse conditions. - Brute-force network attacks are harder to deal with. A - spoofed-packet attack, for example, is nearly impossible to stop, - short of cutting your system off from the Internet. It may not be - able to take your machine down, but it can saturate your - Internet connection. - - - security - account compromises - - - A user account compromise is even more common than a DoS - attack. Many sysadmins still run standard - telnetd, rlogind, - rshd, - and ftpd servers on their machines. - These servers, by default, do - not operate over encrypted connections. The result is that if you - have any moderate-sized user base, one or more of your users logging - into your system from a remote location (which is the most common - and convenient way to login to a system) will have his or her - password sniffed. The attentive system admin will analyze his - remote access logs looking for suspicious source addresses even for - successful logins. - - One must always assume that once an attacker has access to a - user account, the attacker can break root. - However, the reality is that in a well secured and maintained system, - access to a user account does not necessarily give the attacker - access to root. The distinction is important - because without access to root the attacker - cannot generally hide his tracks and may, at best, be able to do - nothing more than mess with the user's files, or crash the machine. - User account compromises are very common because users tend not to - take the precautions that sysadmins take. - - - security - backdoors - + Introduction - System administrators must keep in mind that there are - potentially many ways to break root on a machine. - The attacker may know the root password, - the attacker may find a bug in a root-run server and be able - to break root over a network - connection to that server, or the attacker may know of a bug in - a suid-root program that allows the attacker to break - root once he has broken into a user's account. - If an attacker has found a way to break root - on a machine, the attacker may not have a need - to install a backdoor. Many of the root holes - found and closed to date involve a considerable amount of work - by the attacker to cleanup after himself, so most attackers install - backdoors. A backdoor provides the attacker with a way to easily - regain root access to the system, but it - also gives the smart system administrator a convenient way - to detect the intrusion. - Making it impossible for an attacker to install a backdoor may - actually be detrimental to your security, because it will not - close off the hole the attacker found to break in the first - place. - - - Security remedies should always be implemented with a - multi-layered onion peel approach and can be - categorized as follows: + Security is everyone's responsibility. A weak entry point + in any system could allow intruders to gain access to critical + information and cause havoc on an entire network. One of the + core principles of information security is the + CIA triad, which stands for the + Confidentiality, Integrity, and Availability of information + systems. + + The CIA triad is a bedrock concept of + computer security as customers and users expect their data to be + protected. For example, a customer expects that their credit + card information is securely stored (confidentiality), that + their orders are not changed behind the scenes (integrity), and + that they have access to their order information at all times + (availablility). + + To provide CIA, security professionals + apply a defense in depth strategy. The idea of defense in depth + is to add several layers of security to prevent one single layer + failing and the entire security system collapsing. For example, + a system administrator cannot simply turn on a firewall and + consider the network or system secure. One must also audit + accounts, check the integrity of binaries, and ensure malicious + tools are not installed. To implement an effective security + strategy, one must understand threats and how to defend against + them. + + What is a threat as it pertains to computer security? + Threats are not limited to remote attackers who attempt to + access a system without permission from a remote location. + Threats also include employees, malicious software, unauthorized + network devices, natural disasters, security vulnerabilities, + and even competing corporations. + + Systems and networks can be accessed without permission, + sometimes by accident, or by remote attackers, and in some + cases, via corporate espionage or former employees. As a user, + it is important to prepare for and admit when a mistake has lead + to a security breach and report possible issues to the security + team. As an administrator, it is important to know of the + threats and be prepared to mitigate them. + + When applying security to systems, it is recommended to + start by securing the basic accounts and system configuration, + and then to secure the network layer so that it adheres to the + system policy and the organization's security procedures. Many + organizations already have a security policy that covers the + configuration of technology devices. The policy should include + the security configuration of workstations, desktops, mobile + devices, phones, production servers, and development servers. + In many cases, standard operating procedures + (SOPs) already exist. When in doubt, ask the + security team. + + The rest of this introduction describes how some of these + basic security configurations are performed on a &os; system. + The rest of this chapter describes some specific tools which can + be used when implementing a security policy on a &os; + system. + + + Preventing Logins + + In securing a system, a good starting point is an audit of + accounts. Ensure that root has a strong password and + that this password is not shared. Disable any accounts that + do not need login access. + + To deny login access to accounts, two methods exist. The + first is to lock the account. This example locks the + toor account: + + &prompt.root; pw lock toor + + The second method is to prevent login access by changing + the shell to /sbin/nologin. Only the + superuser can change the shell for other users: + + &prompt.root; chsh -s /usr/sbin/nologin toor + + The /usr/sbin/nologin shell prevents + the system from assigning a shell to the user when they + attempt to login. + + + + Permitted Account Escalation + + In some cases, system administration needs to be shared + with other users. &os; has two methods to handle this. The + first one, which is not recommended, is a shared root password + used by members of the wheel group. With this + method, a user types su and enters the + password for wheel + whenever superuser access is needed. The user should then + type exit to leave privileged access after + finishing the commands that required administrative access. + To add a user to this group, edit + /etc/group and add the user to the end of + the wheel entry. The user must be + separated by a comma character with no space. + + The second, and recommended, method to permit privilege + escalation is to install the security/sudo + package or port. This software provides additional auditing, + more fine-grained user control, and can be configured to lock + users into running only the specified privileged + commands. + + After installation, use visudo to edit + /usr/local/etc/sudoers. This example + creates a new webadmin group, adds the + trhodes account to + that group, and configures that group access to restart + apache24: + + &prompt.root; pw groupadd webadmin -M trhodes -g 6000 +&prompt.root; visudo +%webadmin ALL=(ALL) /usr/sbin/service apache24 * + + + + Password Hashes + + Passwords are a necessary evil of technology. When they + must be used, they should be complex and a powerful hash + mechanism should be used to encrypt the version that is stored + in the password database. &os; supports the + DES, MD5, + SHA256, SHA512, and + Blowfish hash algorithms in its crypt() + library. The default of SHA512 should not + be changed to a less secure hashing algorithm, but can be + changed to the more secure Blowfish algorithm. - - - Securing root and staff accounts. - + + Blowfish is not part of AES and is + not considered compliant with any Federal Information + Processing Standards (FIPS). Its use may + not be permitted in some environments. + - - Securing root–run servers - and suid/sgid binaries. - + To determine which hash algorithm is used to encrypt a + user's password, the superuser can view the hash for the user + in the &os; password database. Each hash starts with a symbol + which indicates the type of hash mechanism used to encrypt the + password. If DES is used, there is no + beginning symbol. For MD5, the symbol is + $. For SHA256 and + SHA512, the symbol is + $6$. For Blowfish, the symbol is + $2a$. In this example, the password for + dru is hashed using + the default SHA512 algorithm as the hash + starts with $6$. Note that the encrypted + hash, not the password itself, is stored in the password + database: + + &prompt.root; grep dru /etc/master.passwd +dru:$6$pzIjSvCAn.PBYQBA$PXpSeWPx3g5kscj3IMiM7tUEUSPmGexxta.8Lt9TGSi2lNQqYGKszsBPuGME0:1001:1001::0:0:dru:/usr/home/dru:/bin/csh + + The hash mechanism is set in the user's login class. For + this example, the user is in the default + login class and the hash algorithm is set with this line in + /etc/login.conf: + + :passwd_format=sha512:\ + + To change the algorithm to Blowfish, modify that line to + look like this: + + :passwd_format=blf:\ + + Then run cap_mkdb /etc/login.conf as + described in . Note that this + change will not affect any existing password hashes. This + means that all passwords should be re-hashed by asking users + to run passwd in order to change their + password. + + For remote logins, two-factor authentication should be + used. An example of two-factor authentication is + something you have, such as a key, and + something you know, such as the passphrase for + that key. Since OpenSSH is part of + the &os; base system, all network logins should be over an + encrypted connection and use key-based authentication instead + of passwords. For more information, refer to . Kerberos users may need to make + additional changes to implement + OpenSSH in their network. These + changes are described in . + + + + Password Policy Enforcement + + Enforcing a strong password policy for local accounts is a + fundamental aspect of system security. In &os;, password + length, password strength, and password complexity can be + implemented using built-in Pluggable Authentication Modules + (PAM). + + This section demonstrates how to configure the minimum and + maximum password length and the enforcement of mixed + characters using the pam_passwdqc.so + module. This module is enforced when a user changes their + password. + + To configure this module, become the superuser and + uncomment the line containing + pam_passwdqc.so in + /etc/pam.d/passwd. Then, edit that line + to match the password policy: + + password requisite pam_passwdqc.so min=disabled,disabled,disabled,12,10 similar=deny retry=3 enforce=users + + This example sets several requirements for new passwords. + The min setting controls the minimum + password length. It has five values because this module + defines five different types of passwords based on their + complexity. Complexity is defined by the type of characters + that must exist in a password, such as letters, numbers, + symbols, and case. The types of passwords are described in + &man.pam.passwdqc.8;. In this example, the first three types + of passwords are disabled, meaning that passwords that meet + those complexity requirements will not be accepted, regardless + of their length. The 12 sets a minimum + password policy of at least twelve characters, if the password + also contains characters with three types of complexity. The + 10 sets the password policy to also allow + passwords of at least ten characters, if the password contains + characters with four types of complexity. + + The similar setting denies passwords + that are similar to the user's previous password. The + retry setting provides a user with three + opportunities to enter a new password. - - Securing user accounts. - + Once this file is saved, a user changing their password + will see a message similar to the following: - - Securing the password file. - + &prompt.user; passwd +Changing local password for trhodes +Old Password: - - Securing the kernel core, raw devices, and - file systems. - +You can now choose the new password. +A valid password should be a mix of upper and lower case letters, +digits and other characters. You can use a 12 character long +password with characters from at least 3 of these 4 classes, or +a 10 character long password containing characters from all the +classes. Characters that form a common pattern are discarded by +the check. +Alternatively, if noone else can see your terminal now, you can +pick this as your password: "trait-useful&knob". +Enter new password: + + If a password that does not match the policy is entered, + it will be rejected with a warning and the user will have an + opportunity to try again, up to the configured number of + retries. + + Most password policies require passwords to expire after + so many days. To set a password age time in &os;, set + for the user's login class in + /etc/login.conf. The + default login class contains an + example: + + # :passwordtime=90d:\ + + So, to set an expiry of 90 days for this login class, + remove the comment symbol (#), save the + edit, and run cap_mkdb + /etc/login.conf. + + To set the expiration on individual users, pass an + expiration date or the number of days to expiry and a username + to pw: + + &prompt.root; pw usermod -p 30-apr-2015 -n trhodes + + As seen here, an expiration date is set in the form of + day, month, and year. For more information, see + &man.pw.8;. + + + + Detecting Rootkits + + A rootkit is any unauthorized + software that attempts to gain root access to a system. Once + installed, this malicious software will normally open up + another avenue of entry for an attacker. Realistically, once + a system has been compromised by a rootkit and an + investigation has been performed, the system should be + reinstalled from scratch. There is tremendous risk that even + the most prudent security or systems engineer will miss + something an attacker left behind. + + A rootkit does do one thing usefulfor administrators: once + detected, it is a sign that a compromise happened at some + point. But, these types of applications tend to be very well + hidden. This section demonstrates a tool that can be used to + detect rootkits, security/rkhunter. + + After installation of this package or port, the system may + be checked using the following command. It will produce a lot + of information and will require some manual pressing of + ENTER: + + &prompt.root; rkhunter -c + + After the process completes, a status message will be + printed to the screen. This message will include the amount + of files checked, suspect files, possible rootkits, and more. + During the check, some generic security warnings may + be produced about hidden files, the + OpenSSH protocol selection, and + known vulnerable versions of installed software. These can be + handled now or after a more detailed analysis has been + performed. + + Every administrator should know what is running on the + systems they are responsible for. Third-party tools like + rkhunter and + sysutils/lsof, and native commands such + as netstat and ps, can + show a great deal of information on the system. Take notes on + what is normal, ask questions when something seems out of + place, and be paranoid. While preventing a compromise is + ideal, detecting a compromise is a must. + + + + Binary Verification + + Verification of system files and binaries is important + because it provides the system administration and security + teams information about system changes. A software + application that monitors the system for changes is called an + Intrusion Detection System (IDS). + + &os; provides native support for a basic + IDS system. While the nightly security + emails will notify an administrator of changes, the + information is stored locally and there is a chance that a + malicious user could modify this information in order to hide + their changes to the system. As such, it is recommended to + create a separate set of binary signatures and store them on a + read-only, root-owned directory or, preferably, on a removable + USB disk or remote + rsync server. + + The built-in mtree utility can be used + to generate a specification of the contents of a directory. A + seed, or a numeric constant, is used to generate the + specification and is required to check that the specification + has not changed. This makes it possible to determine if a + file or binary has been modified. Since the seed value is + unknown by an attacker, faking or checking the checksum values + of files will be difficult to impossible. The following + example generates a set of SHA256 hashes, + one for each system binary in /bin, and + saves those values to a hidden file in root's home directory, + /root/.bin_chksum_mtree: + + &prompt.root; mtree -s 3483151339707503 -c -K cksum,sha256digest -p /bin > /root/.bin_chksum_mtree +&prompt.root; mtree: /bin checksum: 3427012225 + + The 3483151339707503 represents + the seed. This value should be remembered, but not + shared. + + Viewing /root/.bin_cksum_mtree should + yield output similar to the following: + + # user: root +# machine: dreadnaught +# tree: /bin +# date: Mon Feb 3 10:19:53 2014 + +# . +/set type=file uid=0 gid=0 mode=0555 nlink=1 flags=none +. type=dir mode=0755 nlink=2 size=1024 \ + time=1380277977.000000000 + \133 nlink=2 size=11704 time=1380277977.000000000 \ + cksum=484492447 \ + sha256digest=6207490fbdb5ed1904441fbfa941279055c3e24d3a4049aeb45094596400662a + cat size=12096 time=1380277975.000000000 cksum=3909216944 \ + sha256digest=65ea347b9418760b247ab10244f47a7ca2a569c9836d77f074e7a306900c1e69 + chflags size=8168 time=1380277975.000000000 cksum=3949425175 \ + sha256digest=c99eb6fc1c92cac335c08be004a0a5b4c24a0c0ef3712017b12c89a978b2dac3 + chio size=18520 time=1380277975.000000000 cksum=2208263309 \ + sha256digest=ddf7c8cb92a58750a675328345560d8cc7fe14fb3ccd3690c34954cbe69fc964 + chmod size=8640 time=1380277975.000000000 cksum=2214429708 \ + sha256digest=a435972263bf814ad8df082c0752aa2a7bdd8b74ff01431ccbd52ed1e490bbe7 + + The machine's hostname, the date and time the + specification was created, and the name of the user who + created the specification are included in this report. There + is a checksum, size, time, and SHA256 + digest for each binary in the directory. + + To verify that the binary signatures have not changed, + compare the current contents of the directory to the + previously generated specification, and save the results to a + file. This command requires the seed that was used to + generate the original specification: + + &prompt.root; mtree -s 3483151339707503 -p /bin < /root/.bin_chksum_mtree >> /root/.bin_chksum_output +&prompt.root; mtree: /bin checksum: 3427012225 + + This should produce the same checksum for + /bin that was produced when the + specification was created. If no changes have occurred to the + binaries in this directory, the + /root/.bin_chksum_output output file will + be empty. To simulate a change, change the date on + /bin/cat using touch + and run the verification command again: + + &prompt.root; touch /bin/cat +&prompt.root; mtree -s 3483151339707503 -p /bin < /root/.bin_chksum_mtree >> /root/.bin_chksum_output +&prompt.root; more /root/.bin_chksum_output +cat changed + modification time expected Fri Sep 27 06:32:55 2013 found Mon Feb 3 10:28:43 2014 + + It is recommended to create specifications for the + directories which contain binaries and configuration files, as + well as any directories containing sensitive data. Typically, + specifications are created for /bin, + /sbin, /usr/bin, + /usr/sbin, + /usr/local/bin, + /etc, and + /usr/local/etc. + + More advanced IDS systems exist, such + as security/aide. In most cases, + mtree provides the functionality + administrators need. It is important to keep the seed value + and the checksum output hidden from malicious users. More + information about mtree can be found in + &man.mtree.8;. + + + + System Tuning for Security + + In &os;, many system features can be tuned using + sysctl. A few of the security features + which can be tuned to prevent Denial of Service + (DoS) attacks will be covered in this + section. More information about using + sysctl, including how to temporarily change + values and how to make the changes permanent after testing, + can be found in . - - Quick detection of inappropriate changes made to the + + Any time a setting is changed with + sysctl, the chance to cause undesired + harm is increased, affecting the availability of the system. + All changes should be monitored and, if possible, tried on a + testing system before being used on a production system. - - - - Paranoia. - - - - The next section of this chapter will cover the above bullet - items in greater depth. - - - - &os; 的系統安全 - - security - securing &os; - - - - Command vs. Protocol - Throughout this document, we will use - bold text to refer to an - application, and a monospaced font to refer - to specific commands. Protocols will use a normal font. This - typographical distinction is useful for instances such as ssh, - since it is - a protocol as well as command. - - - The sections that follow will cover the methods of securing your - &os; system that were mentioned in the last section of this chapter. - - - Securing the <systemitem class="username">root</systemitem> Account and - Staff Accounts - - su - - - First off, do not bother securing staff accounts if you have - not secured the root account. - Most systems have a password assigned to the root - account. The first thing you do is assume - that the password is always compromised. - This does not mean that you should remove the password. The - password is almost always necessary for console access to the - machine. What it does mean is that you should not make it - possible to use the password outside of the console or possibly - even with the &man.su.1; command. For example, make sure that - your ptys are specified as being insecure in the - /etc/ttys file so that direct - root logins - via telnet or rlogin are - disallowed. If using other login services such as - sshd, make sure that direct - root logins are disabled there as well. - You can do this by editing - your /etc/ssh/sshd_config file, and making - sure that PermitRootLogin is set to - NO. Consider every access method — - services such as FTP often fall through the cracks. - Direct root logins should only be allowed - via the system console. - - wheel - - - Of course, as a sysadmin you have to be able to get to - root, so we open up a few holes. - But we make sure these holes require additional password - verification to operate. One way to make root - accessible is to add appropriate staff accounts to the - wheel group (in - /etc/group). The staff members placed in the - wheel group are allowed to - su to root. - You should never give staff - members native wheel access by putting them in the - wheel group in their password entry. Staff - accounts should be placed in a staff group, and - then added to the wheel group via the - /etc/group file. Only those staff members - who actually need to have root access - should be placed in the - wheel group. It is also possible, when using - an authentication method such as Kerberos, to use Kerberos' - .k5login file in the root - account to allow a &man.ksu.1; to root - without having to place anyone at all in the - wheel group. This may be the better solution - since the wheel mechanism still allows an - intruder to break root if the intruder - has gotten hold of your - password file and can break into a staff account. While having - the wheel mechanism is better than having - nothing at all, it is not necessarily the safest option. - - - - An indirect way to secure staff accounts, and ultimately - root access is to use an alternative - login access method and - do what is known as starring out the encrypted - password for the staff accounts. Using the &man.vipw.8; - command, one can replace each instance of an encrypted password - with a single * character. - This command will update the /etc/master.passwd - file and user/password database to disable password-authenticated - logins. - - A staff account entry such as: - - foobar:R9DT/Fa1/LV9U:1000:1000::0:0:Foo Bar:/home/foobar:/usr/local/bin/tcsh - - Should be changed to this: - - foobar:*:1000:1000::0:0:Foo Bar:/home/foobar:/usr/local/bin/tcsh - - This change will prevent normal logins from occurring, - since the encrypted password will never match - *. With this done, - staff members must use - another mechanism to authenticate themselves such as - &man.kerberos.1; or &man.ssh.1; using a public/private key - pair. When using something like Kerberos, one generally must - secure the machines which run the Kerberos servers and your - desktop workstation. When using a public/private key pair - with ssh, one must generally secure - the machine used to login from (typically - one's workstation). An additional layer of protection can be - added to the key pair by password protecting the key pair when - creating it with &man.ssh-keygen.1;. Being able to - star out the passwords for staff accounts also - guarantees that staff members can only login through secure - access methods that you have set up. This forces all staff - members to use secure, encrypted connections for all of their - sessions, which closes an important hole used by many - intruders: sniffing the network from an unrelated, - less secure machine. - - The more indirect security mechanisms also assume that you are - logging in from a more restrictive server to a less restrictive - server. For example, if your main box is running all sorts of - servers, your workstation should not be running any. In order for - your workstation to be reasonably secure you should run as few - servers as possible, up to and including no servers at all, and - you should run a password-protected screen blanker. Of course, - given physical access to a workstation an attacker can break any - sort of security you put on it. This is definitely a problem that - you should consider, but you should also consider the fact that the - vast majority of break-ins occur remotely, over a network, from - people who do not have physical access to your workstation or - servers. - KerberosIV - - Using something like Kerberos also gives you the ability to - disable or change the password for a staff account in one place, - and have it immediately affect all the machines on which the staff - member may have an account. If a staff member's account gets - compromised, the ability to instantly change his password on all - machines should not be underrated. With discrete passwords, - changing a password on N machines can be a mess. You can also - impose re-passwording restrictions with Kerberos: not only can a - Kerberos ticket be made to timeout after a while, but the Kerberos - system can require that the user choose a new password after a - certain period of time (say, once a month). - - - - Securing Root-run Servers and SUID/SGID Binaries - - - ntalk - - - comsat - - - finger - - - sandboxes - - - sshd - - - telnetd - - - rshd - - - rlogind - - - The prudent sysadmin only runs the servers he needs to, no - more, no less. Be aware that third party servers are often the - most bug-prone. For example, running an old version of - imapd or - popper is like giving a universal - root ticket out to the entire world. - Never run a server that you have not checked out carefully. - Many servers do not need to be run as root. - For example, the ntalk, - comsat, and - finger daemons can be run in special - user sandboxes. A sandbox is not perfect, - unless you go through a large amount of trouble, but the onion - approach to security still stands: If someone is able to break - in through a server running in a sandbox, they still have to - break out of the sandbox. The more layers the attacker must - break through, the lower the likelihood of his success. Root - holes have historically been found in virtually every server - ever run as root, including basic system servers. - If you are running a machine through which people only login via - sshd and never login via - telnetd or - rshd or - rlogind, then turn off those - services! - - &os; now defaults to running - ntalkd, - comsat, and - finger in a sandbox. Another program - which may be a candidate for running in a sandbox is &man.named.8;. - /etc/defaults/rc.conf includes the arguments - necessary to run named in a sandbox in a - commented-out form. Depending on whether you are installing a new - system or upgrading an existing system, the special user accounts - used by these sandboxes may not be installed. The prudent - sysadmin would research and implement sandboxes for servers - whenever possible. - - sendmail - - - There are a number of other servers that typically do not run - in sandboxes: sendmail, - popper, - imapd, ftpd, - and others. There are alternatives to some of these, but - installing them may require more work than you are willing to - perform (the convenience factor strikes again). You may have to - run these servers as root and rely on other - mechanisms to detect break-ins that might occur through them. - - The other big potential root holes in a - system are the - suid-root and sgid binaries installed on the system. Most of - these binaries, such as rlogin, reside - in /bin, /sbin, - /usr/bin, or /usr/sbin. - While nothing is 100% safe, the system-default suid and sgid - binaries can be considered reasonably safe. Still, - root holes are occasionally found in these - binaries. A root hole was found in - Xlib in 1998 that made - xterm (which is typically suid) - vulnerable. It is better to be safe than sorry and the prudent - sysadmin will restrict suid binaries, that only staff should run, - to a special group that only staff can access, and get rid of - (chmod 000) any suid binaries that nobody uses. - A server with no display generally does not need an - xterm binary. Sgid binaries can be - almost as dangerous. If an intruder can break an sgid-kmem binary, - the intruder might be able to read /dev/kmem - and thus read the encrypted password file, potentially compromising - any passworded account. Alternatively an intruder who breaks - group kmem can monitor keystrokes sent through - ptys, including ptys used by users who login through secure - methods. An intruder that breaks the tty - group can write to - almost any user's tty. If a user is running a terminal program or - emulator with a keyboard-simulation feature, the intruder can - potentially generate a data stream that causes the user's terminal - to echo a command, which is then run as that user. - - - - Securing User Accounts - - User accounts are usually the most difficult to secure. While - you can impose Draconian access restrictions on your staff and - star out their passwords, you may not be able to - do so with any general user accounts you might have. If you do - have sufficient control, then you may win out and be able to secure - the user accounts properly. If not, you simply have to be more - vigilant in your monitoring of those accounts. Use of - ssh and Kerberos for user accounts is - more problematic, due to the extra administration and technical - support required, but still a very good solution compared to a - crypted password file. - - - - Securing the Password File - - The only sure fire way is to * out as many - passwords as you can and use ssh or - Kerberos for access to those accounts. Even though the encrypted - password file (/etc/spwd.db) can only be read - by root, it may be possible for an intruder - to obtain read access to that file even if the attacker cannot - obtain root-write access. - - Your security scripts should always check for and report - changes to the password file (see the Checking file integrity section - below). - - - - Securing the Kernel Core, Raw Devices, and - File systems - - If an attacker breaks root he can do - just about anything, but - there are certain conveniences. For example, most modern kernels - have a packet sniffing device driver built in. Under &os; it - is called the bpf device. An intruder - will commonly attempt to run a packet sniffer on a compromised - machine. You do not need to give the intruder the capability and - most systems do not have the need for the - bpf device compiled in. - - - sysctl - - But even if you turn off the bpf - device, you still have - /dev/mem and - /dev/kmem - to worry about. For that matter, the intruder can still write to - raw disk devices. Also, there is another kernel feature called - the module loader, &man.kldload.8;. An enterprising intruder can - use a KLD module to install his own bpf - device, or other sniffing - device, on a running kernel. To avoid these problems you have to - run the kernel at a higher secure level, at least securelevel 1. - The securelevel can be set with a sysctl on - the kern.securelevel variable. Once you have - set the securelevel to 1, write access to raw devices will be - denied and special chflags flags, - such as schg, - will be enforced. You must also ensure that the - schg flag is set on critical startup binaries, - directories, and script files — everything that gets run up - to the point where the securelevel is set. This might be overdoing - it, and upgrading the system is much more difficult when you - operate at a higher secure level. You may compromise and run the - system at a higher secure level but not set the - schg flag for every system file and directory - under the sun. Another possibility is to simply mount - / and /usr read-only. - It should be noted that being too Draconian in what you attempt to - protect may prevent the all-important detection of an - intrusion. - - - - Checking File Integrity: Binaries, Configuration Files, - Etc. - - When it comes right down to it, you can only protect your core - system configuration and control files so much before the - convenience factor rears its ugly head. For example, using - chflags to set the schg bit - on most of the files in / and - /usr is probably counterproductive, because - while it may protect the files, it also closes a detection window. - The last layer of your security onion is perhaps the most - important — detection. The rest of your security is pretty - much useless (or, worse, presents you with a false sense of - safety) if you cannot detect potential incursions. Half the job - of the onion is to slow down the attacker, rather than stop him, in - order to give the detection side of the equation a chance to catch - him in the act. - - The best way to detect an incursion is to look for modified, - missing, or unexpected files. The best way to look for modified - files is from another (often centralized) limited-access system. - Writing your security scripts on the extra-secure limited-access - system makes them mostly invisible to potential attackers, and this - is important. In order to take maximum advantage you generally - have to give the limited-access box significant access to the - other machines in the business, usually either by doing a - read-only NFS export of the other machines to the limited-access - box, or by setting up ssh key-pairs to - allow the limited-access box to ssh to - the other machines. Except for its network traffic, NFS is the - least visible method — allowing you to monitor the - file systems on each client box virtually undetected. If your - limited-access server is connected to the client boxes through a - switch, the NFS method is often the better choice. If your - limited-access server is connected to the client boxes through a - hub, or through several layers of routing, the NFS method may be - too insecure (network-wise) and using - ssh may be the better choice even with - the audit-trail tracks that ssh - lays. - - Once you give a limited-access box, at least read access to the - client systems it is supposed to monitor, you must write scripts - to do the actual monitoring. Given an NFS mount, you can write - scripts out of simple system utilities such as &man.find.1; and - &man.md5.1;. It is best to physically md5 the client-box files - at least once a day, and to test control files such as those - found in /etc and - /usr/local/etc even more often. When - mismatches are found, relative to the base md5 information the - limited-access machine knows is valid, it should scream at a - sysadmin to go check it out. A good security script will also - check for inappropriate suid binaries and for new or deleted files - on system partitions such as / and - /usr. - - When using ssh rather than NFS, - writing the security script is much more difficult. You - essentially have to scp the scripts to the client - box in order to - run them, making them visible, and for safety you also need to - scp the binaries (such as find) that those - scripts use. The ssh client on the - client box may already be compromised. All in all, using - ssh may be necessary when running over - insecure links, but it is also a lot harder to deal with. - - A good security script will also check for changes to user and - staff members access configuration files: - .rhosts, .shosts, - .ssh/authorized_keys and so forth… - files that might fall outside the purview of the - MD5 check. - - If you have a huge amount of user disk space, it may take too - long to run through every file on those partitions. In this case, - setting mount flags to disallow suid binaries and devices on those - partitions is a good idea. The nodev and - nosuid options (see &man.mount.8;) are what you - want to look into. You should probably scan them anyway, at least - once a week, since the object of this layer is to detect a break-in - whether or not the break-in is effective. - - Process accounting (see &man.accton.8;) is a relatively - low-overhead feature of the operating system which might help - as a post-break-in evaluation mechanism. It is especially - useful in tracking down how an intruder has actually broken into - a system, assuming the file is still intact after the break-in - occurs. - - Finally, security scripts should process the log files, and the - logs themselves should be generated in as secure a manner as - possible — remote syslog can be very useful. An intruder - tries to cover his tracks, and log files are critical to the - sysadmin trying to track down the time and method of the initial - break-in. One way to keep a permanent record of the log files is - to run the system console to a serial port and collect the - information on a continuing basis through a secure machine - monitoring the consoles. - - - - Paranoia - - A little paranoia never hurts. As a rule, a sysadmin can add - any number of security features, as long as they do not affect - convenience, and can add security features that - do affect convenience with some added thought. - Even more importantly, a security administrator should mix it up a - bit — if you use recommendations such as those given by this - document verbatim, you give away your methodologies to the - prospective attacker who also has access to this document. - - - - DoS(Denial of Service)服務阻斷攻擊 - Denial of Service (DoS) - - 這一節將介紹服務阻斷攻擊。 DoS 攻擊通常是以封包的方式進行攻擊, - 儘管幾乎沒有任何辦法來阻止大量的偽造封包耗盡網路資源, - 但通常可以透過一些方式來降低這類攻擊的損害,使它們無法擊垮伺服器。 - - - - Limiting server forks. - - - - Limiting springboard attacks (ICMP response 攻擊,ping - broadcast等等) - - - - Kernel Route Cache. - - - - A common DoS attack is against a forking server that attempts - to cause the server to eat processes, file descriptors, and memory, - until the machine dies. inetd - (see &man.inetd.8;) has several - options to limit this sort of attack. It should be noted that - while it is possible to prevent a machine from going down, it is - not generally possible to prevent a service from being disrupted - by the attack. Read the inetd manual - page carefully and pay - specific attention to the , , - and options. Note that spoofed-IP attacks - will circumvent the option to - inetd, so - typically a combination of options must be used. Some standalone - servers have self-fork-limitation parameters. - - Sendmail has its - option, which tends to work - much better than trying to use sendmail's load limiting options - due to the load lag. You should specify a - MaxDaemonChildren parameter, when you start - sendmail, high enough to handle your - expected load, but not so high that the computer cannot handle that - number of sendmails without falling on - its face. It is also prudent to run sendmail in queued mode - () and to run the daemon - (sendmail -bd) separate from the queue-runs - (sendmail -q15m). If you still want real-time - delivery you can run the queue at a much lower interval, such as - , but be sure to specify a reasonable - MaxDaemonChildren option for - that sendmail to prevent cascade failures. - - Syslogd can be attacked directly - and it is strongly recommended that you use the - option whenever possible, and the option - otherwise. - - You should also be fairly careful with connect-back services - such as TCP Wrapper's reverse-identd, - which can be attacked directly. You generally do not want to use - the reverse-ident feature of - TCP Wrapper for this reason. - - It is a very good idea to protect internal services from - external access by firewalling them off at your border routers. - The idea here is to prevent saturation attacks from outside your - LAN, not so much to protect internal services from network-based - root compromise. - Always configure an exclusive firewall, i.e., - firewall everything except ports A, B, - C, D, and M-Z. This way you can firewall off all of your - low ports except for certain specific services such as - named (if you are primary for a zone), - ntalkd, - sendmail, and other Internet-accessible - services. If you try to configure the firewall the other way - — as an inclusive or permissive firewall, there is a good - chance that you will forget to close a couple of - services, or that you will add a new internal service and forget - to update the firewall. You can still open up the high-numbered - port range on the firewall, to allow permissive-like operation, - without compromising your low ports. Also take note that &os; - allows you to control the range of port numbers used for dynamic - binding, via the various net.inet.ip.portrange - sysctl's (sysctl -a | fgrep - portrange), which can also ease the complexity of your - firewall's configuration. For example, you might use a normal - first/last range of 4000 to 5000, and a hiport range of 49152 to - 65535, then block off everything under 4000 in your firewall - (except for certain specific Internet-accessible ports, of - course). - - Another common DoS attack is called a springboard attack - — to attack a server in a manner that causes the server to - generate responses which overloads the server, the local - network, or some other machine. The most common attack of this - nature is the ICMP ping broadcast attack. - The attacker spoofs ping packets sent to your LAN's broadcast - address with the source IP address set to the actual machine they - wish to attack. If your border routers are not configured to - stomp on ping's to broadcast addresses, your LAN winds up - generating sufficient responses to the spoofed source address to - saturate the victim, especially when the attacker uses the same - trick on several dozen broadcast addresses over several dozen - different networks at once. Broadcast attacks of over a hundred - and twenty megabits have been measured. A second common - springboard attack is against the ICMP error reporting system. - By constructing packets that generate ICMP error responses, an - attacker can saturate a server's incoming network and cause the - server to saturate its outgoing network with ICMP responses. This - type of attack can also crash the server by running it out of - mbuf's, especially if the server cannot drain the ICMP responses - it generates fast enough. &os; 4.X kernels have a kernel - compile option called - which limits the effectiveness - of these sorts of attacks. - Later kernels use the sysctl - variable net.inet.icmp.icmplim. - The last major class of springboard - attacks is related to certain internal - inetd services such as the - udp echo service. An attacker simply spoofs a UDP packet with the - source address being server A's echo port, and the destination - address being server B's echo port, where server A and B are both - on your LAN. The two servers then bounce this one packet back and - forth between each other. The attacker can overload both servers - and their LANs simply by injecting a few packets in this manner. - Similar problems exist with the internal - chargen port. A - competent sysadmin will turn off all of these inetd-internal test - services. - - Spoofed packet attacks may also be used to overload the kernel - route cache. Refer to the net.inet.ip.rtexpire, - rtminexpire, and rtmaxcache - sysctl parameters. A spoofed packet attack - that uses a random source IP will cause the kernel to generate a - temporary cached route in the route table, viewable with - netstat -rna | fgrep W3. These routes - typically timeout in 1600 seconds or so. If the kernel detects - that the cached route table has gotten too big it will dynamically - reduce the rtexpire but will never decrease it - to less than rtminexpire. There are two - problems: - - - - The kernel does not react quickly enough when a lightly - loaded server is suddenly attacked. - - - - The rtminexpire is not low enough for - the kernel to survive a sustained attack. - - - - If your servers are connected to the Internet via a T3 or - better, it may be prudent to manually override both - rtexpire and rtminexpire - via &man.sysctl.8;. Never set either parameter to zero (unless - you want to crash the machine). Setting both - parameters to 2 seconds should be sufficient to protect the route - table from attack. - - - - Access Issues with Kerberos and SSH - ssh - KerberosIV - - There are a few issues with both Kerberos and - ssh that need to be addressed if - you intend to use them. Kerberos V is an excellent - authentication protocol, but there are bugs in the kerberized - telnet and - rlogin applications that make them - unsuitable for dealing with binary streams. Also, by default - Kerberos does not encrypt a session unless you use the - option. ssh - encrypts everything by default. - - ssh works quite well in every - respect except that it forwards encryption keys by default. What - this means is that if you have a secure workstation holding keys - that give you access to the rest of the system, and you - ssh to an insecure machine, your keys - are usable. The actual keys themselves are not exposed, but - ssh installs a forwarding port for the - duration of your login, and if an attacker has broken - root on the - insecure machine he can utilize that port to use your keys to gain - access to any other machine that your keys unlock. - - We recommend that you use ssh in - combination with Kerberos whenever possible for staff logins. - ssh can be compiled with Kerberos - support. This reduces your reliance on potentially exposed - ssh keys while at the same time - protecting passwords via Kerberos. ssh - keys should only be used for automated tasks from secure machines - (something that Kerberos is unsuited to do). We also recommend that - you either turn off key-forwarding in the - ssh configuration, or that you make use - of the from=IP/DOMAIN option that - ssh allows in its - authorized_keys file to make the key only - usable to entities logging in from specific machines. - - - - - DES, MD5, and Crypt - - BillSwingleParts rewritten and updated by - - - - - - - security - crypt - - - crypt - DES - MD5 - - Every user on a &unix; system has a password associated with - their account. It seems obvious that these passwords need to be - known only to the user and the actual operating system. In - order to keep these passwords secret, they are encrypted with - what is known as a one-way hash, that is, they can - only be easily encrypted but not decrypted. In other words, what - we told you a moment ago was obvious is not even true: the - operating system itself does not really know - the password. It only knows the encrypted - form of the password. The only way to get the - plain-text password is by a brute force search of the - space of possible passwords. - - Unfortunately the only secure way to encrypt passwords when - &unix; came into being was based on DES, the Data Encryption - Standard. This was not such a problem for users resident in - the US, but since the source code for DES could not be exported - outside the US, &os; had to find a way to both comply with - US law and retain compatibility with all the other &unix; - variants that still used DES. - - The solution was to divide up the encryption libraries - so that US users could install the DES libraries and use - DES but international users still had an encryption method - that could be exported abroad. This is how &os; came to - use MD5 as its default encryption method. MD5 is believed to - be more secure than DES, so installing DES is offered primarily - for compatibility reasons. - - - Recognizing Your Crypt Mechanism + - Before &os; 4.4 libcrypt.a was a - symbolic link pointing to the library which was used for - encryption. &os; 4.4 changed libcrypt.a to - provide a configurable password authentication hash library. - Currently the library supports DES, MD5 and Blowfish hash - functions. By default &os; uses MD5 to encrypt - passwords. - - It is pretty easy to identify which encryption method - &os; is set up to use. Examining the encrypted passwords in - the /etc/master.passwd file is one way. - Passwords encrypted with the MD5 hash are longer than those - encrypted with the DES hash and also begin with the characters - $1$. Passwords starting with - $2a$ are encrypted with the - Blowfish hash function. DES password strings do not - have any particular identifying characteristics, but they are - shorter than MD5 passwords, and are coded in a 64-character - alphabet which does not include the $ - character, so a relatively short string which does not begin with - a dollar sign is very likely a DES password. - - The password format used for new passwords is controlled - by the passwd_format login capability in - /etc/login.conf, which takes values of - des, md5 or - blf. See the &man.login.conf.5; manual page - for more information about login capabilities. + By default, the &os; kernel boots with a security level of + -1. This is called insecure + mode because immutable file flags may be turned off + and all devices may be read from or written to. The security + level will remain at -1 unless it is + altered through sysctl or by a setting in + the startup scripts. The security level may be increased + during system startup by setting + kern_securelevel_enable to + YES in /etc/rc.conf, + and the value of kern_securelevel to the + desired security level. See &man.security.7; and &man.init.8; + for more information on these settings and the available + security levels. + + + Increasing the securelevel can break + Xorg and cause other issues. Be + prepared to do some debugging. + + + The net.inet.tcp.blackhole and + net.inet.udp.blackhole settings can be used + to drop incoming SYN packets on closed + ports without sending a return RST + response. The default behavior is to return an + RST to show a port is closed. Changing the + default provides some level of protection against ports scans, + which are used to determine which applications are running on + a system. Set net.inet.tcp.blackhole to + 2 and + net.inet.udp.blackhole to + 1. Refer to &man.blackhole.4; for more + information about these settings. + + The net.inet.icmp.drop_redirect and + net.inet.ip.redirect settings help prevent + against redirect attacks. A redirect + attack is a type of DoS which sends mass + numbers of ICMP type 5 packets. Since + these packets are not required, set + net.inet.icmp.drop_redirect to + 1 and set + net.inet.ip.redirect to + 0. + + Source routing is a method for detecting and accessing + non-routable addresses on the internal network. This should + be disabled as non-routable addresses are normally not + routable on purpose. To disable this feature, set + net.inet.ip.sourceroute and + net.inet.ip.accept_sourceroute to + 0. + + When a machine on the network needs to send messages to + all hosts on a subnet, an ICMP echo request + message is sent to the broadcast address. However, there is + no reason for an external host to perform such an action. To + reject all external broadcast requests, set + net.inet.icmp.bmcastecho to + 0. + Some additional settings are documented in + &man.security.7;. One-time Passwords + one-time passwords security one-time passwords - S/Key is a one-time password scheme based on a one-way hash - function. &os; uses the MD4 hash for compatibility but other - systems have used MD5 and DES-MAC. S/Key has been part of the - &os; base system since version 1.1.5 and is also used on a - growing number of other operating systems. S/Key is a registered - trademark of Bell Communications Research, Inc. - - From version 5.0 of &os;, S/Key has been replaced with - the functionally equivalent OPIE (One-time Passwords In - Everything). OPIE uses the MD5 hash by default. - - There are three different sorts of passwords which we will discuss - below. The first is your usual &unix; style or - Kerberos password; we will call this a &unix; password. - The second sort is the one-time password which is generated by the - S/Key key program or the OPIE - &man.opiekey.1; program and accepted by the - keyinit or &man.opiepasswd.1; programs - and the login prompt; we will - call this a one-time password. The final sort of - password is the secret password which you give to the - key/opiekey programs (and - sometimes the - keyinit/opiepasswd programs) - which it uses to generate - one-time passwords; we will call it a secret password - or just unqualified password. - - The secret password does not have anything to do with your &unix; - password; they can be the same but this is not recommended. S/Key - and OPIE secret passwords are not limited to 8 characters like old - &unix; passwordsUnder &os; the standard login - password may be up to 128 characters in length., - they can be as long as you like. Passwords of six or - seven word long phrases are fairly common. For the most part, the - S/Key or OPIE system operates completely independently of the &unix; - password system. - - Besides the password, there are two other pieces of data that - are important to S/Key and OPIE. One is what is known as the - seed or key, consisting of two letters - and five digits. The other is what is called the iteration - count, a number between 1 and 100. S/Key creates the - one-time password by concatenating the seed and the secret password, - then applying the MD4/MD5 hash as many times as specified by the - iteration count and turning the result into six short English words. - These six English words are your one-time password. The - authentication system (primarily PAM) keeps - track of the last one-time password used, and the user is - authenticated if the hash of the user-provided password is equal to - the previous password. Because a one-way hash is used it is - impossible to generate future one-time passwords if a successfully - used password is captured; the iteration count is decremented after - each successful login to keep the user and the login program in - sync. When the iteration count gets down to 1, S/Key and OPIE must be - reinitialized. - - There are three programs involved in each system - which we will discuss below. The key and - opiekey programs accept an iteration - count, a seed, and a secret password, and generate a one-time - password or a consecutive list of one-time passwords. The - keyinit and opiepasswd - programs are used to initialize S/Key and OPIE respectively, - and to change passwords, iteration counts, or seeds; they - take either a secret passphrase, or an iteration count, - seed, and one-time password. The keyinfo - and opieinfo programs examine the - relevant credentials files (/etc/skeykeys or - /etc/opiekeys) and print out the invoking user's - current iteration count and seed. - - There are four different sorts of operations we will cover. The - first is using keyinit or - opiepasswd over a secure connection to set up - one-time-passwords for the first time, or to change your password - or seed. The second operation is using keyinit - or opiepasswd over an insecure connection, in - conjunction with key or opiekey - over a secure connection, to do the same. The third is using - key/opiekey to log in over - an insecure connection. The fourth is using key - or opiekey to generate a number of keys which - can be written down or printed out to carry with you when going to - some location without secure connections to anywhere. + By default, &os; includes support for One-time Passwords In + Everything (OPIE). OPIE + is designed to prevent replay attacks, in which an attacker + discovers a user's password and uses it to access a system. + Since a password is only used once in OPIE, a + discovered password is of little use to an attacker. + OPIE uses a secure hash and a + challenge/response system to manage passwords. The &os; + implementation uses the MD5 hash by + default. + + OPIE uses three different types of + passwords. The first is the usual &unix; or Kerberos password. + The second is the one-time password which is generated by + opiekey. The third type of password is the + secret password which is used to generate + one-time passwords. The secret password has nothing to do with, + and should be different from, the &unix; password. + + There are two other pieces of data that are important to + OPIE. One is the seed or + key, consisting of two letters and five digits. + The other is the iteration count, a number + between 1 and 100. OPIE creates the one-time + password by concatenating the seed and the secret password, + applying the MD5 hash as many times as + specified by the iteration count, and turning the result into + six short English words which represent the one-time password. + The authentication system keeps track of the last one-time + password used, and the user is authenticated if the hash of the + user-provided password is equal to the previous password. + Because a one-way hash is used, it is impossible to generate + future one-time passwords if a successfully used password is + captured. The iteration count is decremented after each + successful login to keep the user and the login program in sync. + When the iteration count gets down to 1, + OPIE must be reinitialized. + + There are a few programs involved in this process. A + one-time password, or a consecutive list of one-time passwords, + is generated by passing an iteration count, a seed, and a secret + password to &man.opiekey.1;. In addition to initializing + OPIE, &man.opiepasswd.1; is used to change + passwords, iteration counts, or seeds. The relevant credential + files in /etc/opiekeys are examined by + &man.opieinfo.1; which prints out the invoking user's current + iteration count and seed. + + This section describes four different sorts of operations. + The first is how to set up one-time-passwords for the first time + over a secure connection. The second is how to use + opiepasswd over an insecure connection. The + third is how to log in over an insecure connection. The fourth + is how to generate a number of keys which can be written down or + printed out to use at insecure locations. - Secure Connection Initialization - - To initialize S/Key for the first time, change your password, - or change your seed while logged in over a secure connection - (e.g. on the console of a machine or via ssh), use the - keyinit command without any parameters while - logged in as yourself: - - &prompt.user; keyinit -Adding unfurl: -Reminder - Only use this method if you are directly connected. -If you are using telnet or rlogin exit with no password and use keyinit -s. -Enter secret password: -Again secret password: - -ID unfurl s/key is 99 to17757 -DEFY CLUB PRO NASH LACE SOFT + Initializing <acronym>OPIE</acronym> - For OPIE, opiepasswd is used instead: + To initialize OPIE for the first time, + run this command from a secure location: &prompt.user; opiepasswd -c [grimreaper] ~ $ opiepasswd -f -c @@ -1163,111 +729,82 @@ Using MD5 to compute responses. Enter new secret pass phrase: Again new secret pass phrase: + ID unfurl OTP key is 499 to4268 -MOS MALL GOAT ARM AVID COED - +MOS MALL GOAT ARM AVID COED - At the Enter new secret pass phrase: or - Enter secret password: prompts, you - should enter a password or phrase. Remember, this is not the - password that you will use to login with, this is used to generate - your one-time login keys. The ID line gives the - parameters of your particular instance: your login name, the - iteration count, and seed. When logging in the system - will remember these parameters and present them back to you so you - do not have to remember them. The last line gives the particular - one-time password which corresponds to those parameters and your - secret password; if you were to re-login immediately, this - one-time password is the one you would use. + The sets console mode which assumes + that the command is being run from a secure location, such as + a computer under the user's control or a + SSH session to a computer under the user's + control. + + When prompted, enter the secret password which will be + used to generate the one-time login keys. This password + should be difficult to guess and should be different than the + password which is associated with the user's login account. + It must be between 10 and 127 characters long. Remember this + password. + + The ID line lists the login name + (unfurl), default iteration count + (499), and default seed + (to4268). When logging in, the system will + remember these parameters and display them, meaning that they + do not have to be memorized. The last line lists the + generated one-time password which corresponds to those + parameters and the secret password. At the next login, use + this one-time password. Insecure Connection Initialization - To initialize or change your secret password over an - insecure connection, you will need to already have a secure - connection to some place where you can run key - or opiekey; this might be in the form of a - desk accessory on a &macintosh;, or a shell prompt on a machine you - trust. You will also need to make up an iteration count (100 is - probably a good value), and you may make up your own seed or use a - randomly-generated one. Over on the insecure connection (to the - machine you are initializing), use the keyinit - -s command: - - &prompt.user; keyinit -s -Updating unfurl: -Old key: to17758 -Reminder you need the 6 English words from the key command. -Enter sequence count from 1 to 9999: 100 -Enter new key [default to17759]: -s/key 100 to 17759 -s/key access password: -s/key access password:CURE MIKE BANE HIM RACY GORE - - - For OPIE, you need to use opiepasswd: + To initialize or change the secret password on an + insecure system, a secure connection is needed to some place + where opiekey can be run. This might be a + shell prompt on a trusted machine. An iteration count is + needed, where 100 is probably a good value, and the seed can + either be specified or the randomly-generated one used. On + the insecure connection, the machine being initialized, use + &man.opiepasswd.1;: &prompt.user; opiepasswd Updating unfurl: You need the response from an OTP generator. Old secret pass phrase: - otp-md5 498 to4268 ext - Response: GAME GAG WELT OUT DOWN CHAT + otp-md5 498 to4268 ext + Response: GAME GAG WELT OUT DOWN CHAT New secret pass phrase: - otp-md5 499 to4269 - Response: LINE PAP MILK NELL BUOY TROY + otp-md5 499 to4269 + Response: LINE PAP MILK NELL BUOY TROY ID mark OTP key is 499 gr4269 -LINE PAP MILK NELL BUOY TROY - +LINE PAP MILK NELL BUOY TROY - To accept the default seed (which the - keyinit program confusingly calls a - key), press Return. - Then before entering an - access password, move over to your secure connection or S/Key desk - accessory, and give it the same parameters: - - &prompt.user; key 100 to17759 -Reminder - Do not use this program while logged in via telnet or rlogin. -Enter secret password: <secret password> -CURE MIKE BANE HIM RACY GORE - - Or for OPIE: + To accept the default seed, press Return. + Before entering an access password, move over to the secure + connection and give it the same parameters: &prompt.user; opiekey 498 to4268 Using the MD5 algorithm to compute response. -Reminder: Don't use opiekey from telnet or dial-in sessions. +Reminder: Do not use opiekey from telnet or dial-in sessions. Enter secret pass phrase: -GAME GAG WELT OUT DOWN CHAT - +GAME GAG WELT OUT DOWN CHAT - Now switch back over to the insecure connection, and copy the - one-time password generated over to the relevant program. + Switch back over to the insecure connection, and copy the + generated one-time password over to the relevant + program. Generating a Single One-time Password - Once you have initialized S/Key or OPIE, when you login you will be - presented with a prompt like this: - -&prompt.user; telnet example.com -Trying 10.0.0.1... -Connected to example.com -Escape character is '^]'. - -FreeBSD/i386 (example.com) (ttypa) - -login: <username> -s/key 97 fw13894 -Password: - - Or for OPIE: + After initializing OPIE and logging in, + a prompt like this will be displayed: -&prompt.user; telnet example.com + &prompt.user; telnet example.com Trying 10.0.0.1... Connected to example.com Escape character is '^]'. @@ -1278,77 +815,47 @@ otp-md5 498 gr4269 ext Password: - As a side note, the S/Key and OPIE prompts have a useful feature - (not shown here): if you press Return - at the password prompt, the - prompter will turn echo on, so you can see what you are - typing. This can be extremely useful if you are attempting to - type in a password by hand, such as from a printout. + The OPIE prompts provides a useful + feature. If Return is pressed at the + password prompt, the prompt will turn echo on and display + what is typed. This can be useful when attempting to type in + a password by hand from a printout. MS-DOS Windows MacOS - At this point you need to generate your one-time password to - answer this login prompt. This must be done on a trusted system - that you can run key or - opiekey on. (There are versions of these for DOS, - &windows; and &macos; as well.) They need both the iteration count and - the seed as command line options. You can cut-and-paste these - right from the login prompt on the machine that you are logging - in to. + At this point, generate the one-time password to answer + this login prompt. This must be done on a trusted system + where it is safe to run &man.opiekey.1;. There are versions + of this command for &windows;, &macos; and &os;. This command + needs the iteration count and the seed as command line + options. Use cut-and-paste from the login prompt on the + machine being logged in to. On the trusted system: - &prompt.user; key 97 fw13894 -Reminder - Do not use this program while logged in via telnet or rlogin. -Enter secret password: -WELD LIP ACTS ENDS ME HAAG - - For OPIE: - &prompt.user; opiekey 498 to4268 Using the MD5 algorithm to compute response. -Reminder: Don't use opiekey from telnet or dial-in sessions. +Reminder: Do not use opiekey from telnet or dial-in sessions. Enter secret pass phrase: GAME GAG WELT OUT DOWN CHAT - Now that you have your one-time password you can continue - logging in: - - login: <username> -s/key 97 fw13894 -Password: <return to enable echo> -s/key 97 fw13894 -Password [echo on]: WELD LIP ACTS ENDS ME HAAG -Last login: Tue Mar 21 11:56:41 from 10.0.0.2 ... - + Once the one-time password is generated, continue to log + in. Generating Multiple One-time Passwords - Sometimes you have to go places where you do not have - access to a trusted machine or secure connection. In this case, - it is possible to use the key and - opiekey commands to - generate a number of one-time passwords beforehand to be printed - out and taken with you. For example: - - &prompt.user; key -n 5 30 zz99999 -Reminder - Do not use this program while logged in via telnet or rlogin. -Enter secret password: <secret password> -26: SODA RUDE LEA LIND BUDD SILT -27: JILT SPY DUTY GLOW COWL ROT -28: THEM OW COLA RUNT BONG SCOT -29: COT MASH BARR BRIM NAN FLAG -30: CAN KNEE CAST NAME FOLK BILK - - Or for OPIE: + Sometimes there is no access to a trusted machine or + secure connection. In this case, it is possible to use + &man.opiekey.1; to generate a number of one-time passwords + beforehand. For example: &prompt.user; opiekey -n 5 30 zz99999 Using the MD5 algorithm to compute response. -Reminder: Don't use opiekey from telnet or dial-in sessions. +Reminder: Do not use opiekey from telnet or dial-in sessions. Enter secret pass phrase: <secret password> 26: JOAN BORE FOSS DES NAY QUIT 27: LATE BIAS SLAY FOLK MUCH TRIG @@ -1356,70 +863,27 @@ 29: RIO ODIN GO BYE FURY TIC 30: GREW JIVE SAN GIRD BOIL PHI - The requests five keys in sequence, the - specifies what the last iteration number - should be. Note that these are printed out in - reverse order of eventual use. If you are - really paranoid, you might want to write the results down by hand; - otherwise you can cut-and-paste into lpr. Note - that each line shows both the iteration count and the one-time - password; you may still find it handy to scratch off passwords as - you use them. + The requests five keys in sequence, + and specifies what the last iteration + number should be. Note that these are printed out in + reverse order of use. The really + paranoid might want to write the results down by hand; + otherwise, print the list. Each line shows both the iteration + count and the one-time password. Scratch off the passwords as + they are used. Restricting Use of &unix; Passwords - S/Key can place restrictions on the use of &unix; passwords based - on the host name, user name, terminal port, or IP address of a - login session. These restrictions can be found in the - configuration file /etc/skey.access. The - &man.skey.access.5; manual page has more information on the complete - format of the file and also details some security cautions to be - aware of before depending on this file for security. - - If there is no /etc/skey.access file - (this is the default on &os; 4.X systems), then all users will - be allowed to use &unix; passwords. If the file exists, however, - then all users will be required to use S/Key unless explicitly - permitted to do otherwise by configuration statements in the - skey.access file. In all cases, &unix; - passwords are permitted on the console. - - Here is a sample skey.access configuration - file which illustrates the three most common sorts of configuration - statements: - - permit internet 192.168.0.0 255.255.0.0 -permit user fnord -permit port ttyd0 - - The first line (permit internet) allows - users whose IP source address (which is vulnerable to spoofing) - matches the specified value and mask, to use &unix; passwords. This - should not be considered a security mechanism, but rather, a means - to remind authorized users that they are using an insecure network - and need to use S/Key for authentication. - - The second line (permit user) allows the - specified username, in this case fnord, to use - &unix; passwords at any time. Generally speaking, this should only - be used for people who are either unable to use the - key program, like those with dumb terminals, or - those who are ineducable. - - The third line (permit port) allows all - users logging in on the specified terminal line to use &unix; - passwords; this would be used for dial-ups. - - OPIE can restrict the use of &unix; passwords based on the IP - address of a login session just like S/Key does. The relevant file - is /etc/opieaccess, which is present by default - on &os; 5.0 and newer systems. Please check &man.opieaccess.5; - for more information on this file and which security considerations - you should be aware of when using it. + OPIE can restrict the use of &unix; + passwords based on the IP address of a login session. The + relevant file is /etc/opieaccess, which + is present by default. Refer to &man.opieaccess.5; for more + information on this file and which security considerations to + be aware of when using it. - Here is a sample opieaccess file: + Here is a sample opieaccess: permit 192.168.0.0 255.255.0.0 @@ -1427,21 +891,22 @@ vulnerable to spoofing) matches the specified value and mask, to use &unix; passwords at any time. - If no rules in opieaccess are matched, - the default is to deny non-OPIE logins. - + If no rules in opieaccess are + matched, the default is to deny non-OPIE + logins. - TCP Wrappers + + TCP Wrapper + - TomRhodesWritten by: + TomRhodesWritten + by - - TCP Wrappers 每個熟 &man.inetd.8; 的人幾乎都會聽過 TCP @@ -1642,1318 +1107,764 @@ - - <application>KerberosIV</application> + + + <application>Kerberos</application> + - MarkMurrayContributed by + + + Tillman + Hodgson + + Contributed by + + - MarkDapozBased on a contribution by + + + Mark + Murray + + Based on a contribution by + - - - Kerberos is a network add-on system/protocol that allows users to - authenticate themselves through the services of a secure server. - Services such as remote login, remote copy, secure inter-system file - copying and other high-risk tasks are made considerably safer and more - controllable. - - The following instructions can be used as a guide on how to set up - Kerberos as distributed for &os;. However, you should refer to the - relevant manual pages for a complete description. - - - Installing <application>KerberosIV</application> - - MIT - - KerberosIV - installing - - Kerberos is an optional component of &os;. The easiest - way to install this software is by selecting the krb4 or - krb5 distribution in sysinstall - during the initial installation of &os;. This will install - the eBones (KerberosIV) or Heimdal (Kerberos5) - implementation of Kerberos. These implementations are - included because they are developed outside the USA/Canada and - were thus available to system owners outside those countries - during the era of restrictive export controls on cryptographic - code from the USA. - - Alternatively, the MIT implementation of Kerberos is - available from the Ports Collection as - security/krb5. - - - - Creating the Initial Database - - This is done on the Kerberos server only. First make sure that - you do not have any old Kerberos databases around. You should change - to the directory /etc/kerberosIV and check that - only the following files are present: - - &prompt.root; cd /etc/kerberosIV -&prompt.root; ls -README krb.conf krb.realms - - If any additional files (such as principal.* - or master_key) exist, then use the - kdb_destroy command to destroy the old Kerberos - database, or if Kerberos is not running, simply delete the extra - files. - - You should now edit the krb.conf and - krb.realms files to define your Kerberos realm. - In this case the realm will be EXAMPLE.COM and the - server is grunt.example.com. We edit - or create the krb.conf file: - - &prompt.root; cat krb.conf -EXAMPLE.COM -EXAMPLE.COM grunt.example.com admin server -CS.BERKELEY.EDU okeeffe.berkeley.edu -ATHENA.MIT.EDU kerberos.mit.edu -ATHENA.MIT.EDU kerberos-1.mit.edu -ATHENA.MIT.EDU kerberos-2.mit.edu -ATHENA.MIT.EDU kerberos-3.mit.edu -LCS.MIT.EDU kerberos.lcs.mit.edu -TELECOM.MIT.EDU bitsy.mit.edu -ARC.NASA.GOV trident.arc.nasa.gov - - In this case, the other realms do not need to be there. They are - here as an example of how a machine may be made aware of multiple - realms. You may wish to not include them for simplicity. - - The first line names the realm in which this system works. The - other lines contain realm/host entries. The first item on a line is a - realm, and the second is a host in that realm that is acting as a - key distribution center. The words admin - server following a host's name means that host also - provides an administrative database server. For further explanation - of these terms, please consult the Kerberos manual pages. - - Now we have to add grunt.example.com - to the EXAMPLE.COM realm and also add an entry to - put all hosts in the .example.com - domain in the EXAMPLE.COM realm. The - krb.realms file would be updated as - follows: - - &prompt.root; cat krb.realms -grunt.example.com EXAMPLE.COM -.example.com EXAMPLE.COM -.berkeley.edu CS.BERKELEY.EDU -.MIT.EDU ATHENA.MIT.EDU -.mit.edu ATHENA.MIT.EDU - - Again, the other realms do not need to be there. They are here as - an example of how a machine may be made aware of multiple realms. You - may wish to remove them to simplify things. - - The first line puts the specific system into - the named realm. The rest of the lines show how to default systems of - a particular subdomain to a named realm. - - Now we are ready to create the database. This only needs to run - on the Kerberos server (or Key Distribution Center). Issue the - kdb_init command to do this: - - &prompt.root; kdb_init -Realm name [default ATHENA.MIT.EDU ]: EXAMPLE.COM -You will be prompted for the database Master Password. -It is important that you NOT FORGET this password. - -Enter Kerberos master key: - - Now we have to save the key so that servers on the local machine - can pick it up. Use the kstash command to do - this: - - &prompt.root; kstash + Kerberos is a network + authentication protocol which was originally created by the + Massachusetts Institute of Technology (MIT) + as a way to securely provide authentication across a potentially + hostile network. The Kerberos + protocol uses strong cryptography so that both a client and + server can prove their identity without sending any unencrypted + secrets over the network. Kerberos + can be described as an identity-verifying proxy system and as a + trusted third-party authentication system. After a user + authenticates with Kerberos, their + communications can be encrypted to assure privacy and data + integrity. + + The only function of Kerberos is + to provide the secure authentication of users and servers on the + network. It does not provide authorization or auditing + functions. It is recommended that + Kerberos be used with other security + methods which provide authorization and audit services. -Enter Kerberos master key: + The current version of the protocol is version 5, described + in RFC 4120. Several free + implementations of this protocol are available, covering a wide + range of operating systems. MIT continues to + develop their Kerberos package. It + is commonly used in the US as a cryptography + product, and has historically been subject to + US export regulations. In &os;, + MIT Kerberos is + available as the security/krb5 package or + port. The Heimdal Kerberos + implementation was explicitly developed outside of the + US to avoid export regulations. The Heimdal + Kerberos distribution is included in + the base &os; installation, and another distribution with more + configurable options is available as + security/heimdal in the Ports + Collection. + + In Kerberos users and services + are identified as principals which are contained + within an administrative grouping, called a + realm. A typical user principal would be of the + form + user@REALM + (realms are traditionally uppercase). + + This section provides a guide on how to set up + Kerberos using the Heimdal + distribution included in &os;. + + For purposes of demonstrating a + Kerberos installation, the name + spaces will be as follows: -Current Kerberos master key version is 1. + + + The DNS domain (zone) will be + example.org. + -Master key entered. BEWARE! + + The Kerberos realm will be + EXAMPLE.ORG. + + - This saves the encrypted master password in - /etc/kerberosIV/master_key. - + + Use real domain names when setting up + Kerberos, even if it will run + internally. This avoids DNS problems and + assures inter-operation with other + Kerberos realms. + - Making It All Run + Setting up a Heimdal <acronym>KDC</acronym> - KerberosIV - initial startup + Kerberos5 + Key Distribution Center - Two principals need to be added to the database for - each system that will be secured with Kerberos. - Their names are kpasswd and rcmd. - These two principals are made for each system, with the instance being - the name of the individual system. - - These daemons, kpasswd and - rcmd allow other systems to change Kerberos - passwords and run commands like &man.rcp.1;, - &man.rlogin.1; and &man.rsh.1;. + The Key Distribution Center (KDC) is + the centralized authentication service that + Kerberos provides, the + trusted third party of the system. It is the + computer that issues Kerberos + tickets, which are used for clients to authenticate to + servers. Because the KDC is considered + trusted by all other computers in the + Kerberos realm, it has heightened + security concerns. Direct access to the KDC should be + limited. + + While running a KDC requires few + computing resources, a dedicated machine acting only as a + KDC is recommended for security + reasons. - Now let us add these entries: + To begin setting up a KDC, add these + lines to /etc/rc.conf: - &prompt.root; kdb_edit -Opening database... + kerberos5_server_enable="YES" +kadmind5_server_enable="YES" -Enter Kerberos master key: + Next, edit /etc/krb5.conf as + follows: -Current Kerberos master key version is 1. + [libdefaults] + default_realm = EXAMPLE.ORG +[realms] + EXAMPLE.ORG = { + kdc = kerberos.example.org + admin_server = kerberos.example.org + } +[domain_realm] + .example.org = EXAMPLE.ORG -Master key entered. BEWARE! -Previous or default values are in [brackets] , -enter return to leave the same, or new value. + In this example, the KDC will use the + fully-qualified hostname kerberos.example.org. The + hostname of the KDC must be resolvable in the + DNS. + + Kerberos can also use the + DNS to locate KDCs, instead of a + [realms] section in + /etc/krb5.conf. For large organizations + that have their own DNS servers, the above + example could be trimmed to: -Principal name: passwd -Instance: grunt + [libdefaults] + default_realm = EXAMPLE.ORG +[domain_realm] + .example.org = EXAMPLE.ORG -<Not found>, Create [y] ? y + With the following lines being included in the + example.org zone + file: + + _kerberos._udp IN SRV 01 00 88 kerberos.example.org. +_kerberos._tcp IN SRV 01 00 88 kerberos.example.org. +_kpasswd._udp IN SRV 01 00 464 kerberos.example.org. +_kerberos-adm._tcp IN SRV 01 00 749 kerberos.example.org. +_kerberos IN TXT EXAMPLE.ORG -Principal: passwd, Instance: grunt, kdc_key_ver: 1 -New Password: <---- enter RANDOM here -Verifying password + + In order for clients to be able to find the + Kerberos services, they + must have either + a fully configured /etc/krb5.conf or a + minimally configured /etc/krb5.conf + and a properly configured + DNS server. + -New Password: <---- enter RANDOM here + Next, create the Kerberos + database which contains the keys of all principals (users and + hosts) encrypted with a master password. It is not required + to remember this password as it will be stored in + /var/heimdal/m-key; it would be + reasonable to use a 45-character random password for this + purpose. To create the master key, run + kstash and enter a password: -Random password [y] ? y + &prompt.root; kstash +Master key: xxxxxxxxxxxxxxxxxxxxxxx +Verifying password - Master key: xxxxxxxxxxxxxxxxxxxxxxx -Principal's new key version = 1 -Expiration date (enter yyyy-mm-dd) [ 2000-01-01 ] ? -Max ticket lifetime (*5 minutes) [ 255 ] ? -Attributes [ 0 ] ? -Edit O.K. -Principal name: rcmd -Instance: grunt + Once the master key has been created, the database should + be initialized. The Kerberos + administrative tool &man.kadmin.8; can be used on the KDC in a + mode that operates directly on the database, without using the + &man.kadmind.8; network service, as + kadmin -l. This resolves the + chicken-and-egg problem of trying to connect to the database + before it is created. At the kadmin + prompt, use init to create the realm's + initial database: + + &prompt.root; kadmin -l +kadmin> init EXAMPLE.ORG +Realm max ticket life [unlimited]: + + Lastly, while still in kadmin, create + the first principal using add. Stick to + the default options for the principal for now, as these can be + changed later with modify. Type + ? at the prompt to see the available + options. -<Not found>, Create [y] ? + kadmin> add tillman +Max ticket life [unlimited]: +Max renewable life [unlimited]: +Attributes []: +Password: xxxxxxxx +Verifying password - Password: xxxxxxxx -Principal: rcmd, Instance: grunt, kdc_key_ver: 1 -New Password: <---- enter RANDOM here -Verifying password + Next, start the KDC services by running + service kerberos start and + service kadmind start. While there will + not be any kerberized daemons running at this point, it is + possible to confirm that the KDC is + functioning by obtaining a ticket for the + principal that was just created: -New Password: <---- enter RANDOM here + &prompt.user; kinit tillman +tillman@EXAMPLE.ORG's Password: -Random password [y] ? + Confirm that a ticket was successfully obtained using + klist: -Principal's new key version = 1 -Expiration date (enter yyyy-mm-dd) [ 2000-01-01 ] ? -Max ticket lifetime (*5 minutes) [ 255 ] ? -Attributes [ 0 ] ? -Edit O.K. -Principal name: <---- null entry here will cause an exit - + &prompt.user; klist +Credentials cache: FILE:/tmp/krb5cc_1001 + Principal: tillman@EXAMPLE.ORG - - Creating the Server File + Issued Expires Principal +Aug 27 15:37:58 2013 Aug 28 01:37:58 2013 krbtgt/EXAMPLE.ORG@EXAMPLE.ORG - We now have to extract all the instances which define the - services on each machine. For this we use the - ext_srvtab command. This will create a file - which must be copied or moved by secure - means to each Kerberos client's - /etc/kerberosIV directory. This file must - be present on each server and client, and is crucial to the - operation of Kerberos. - - - &prompt.root; ext_srvtab grunt -Enter Kerberos master key: - -Current Kerberos master key version is 1. - -Master key entered. BEWARE! -Generating 'grunt-new-srvtab'.... - - Now, this command only generates a temporary file which must be - renamed to srvtab so that all the servers can pick - it up. Use the &man.mv.1; command to move it into place on - the original system: - - &prompt.root; mv grunt-new-srvtab srvtab - - If the file is for a client system, and the network is not deemed - safe, then copy the - client-new-srvtab to - removable media and transport it by secure physical means. Be sure to - rename it to srvtab in the client's - /etc/kerberosIV directory, and make sure it is - mode 600: + The temporary ticket can be destroyed when the test is + finished: - &prompt.root; mv grumble-new-srvtab srvtab -&prompt.root; chmod 600 srvtab + &prompt.user; kdestroy - Populating the Database - - We now have to add some user entries into the database. First - let us create an entry for the user jane. Use the - kdb_edit command to do this: + Configuring a Server to Use + <application>Kerberos</application> - &prompt.root; kdb_edit -Opening database... - -Enter Kerberos master key: - -Current Kerberos master key version is 1. + + Kerberos5 + enabling services + -Master key entered. BEWARE! -Previous or default values are in [brackets] , -enter return to leave the same, or new value. + The first step in configuring a server to use + Kerberos authentication is to + ensure that it has the correct configuration in + /etc/krb5.conf. The version from the + KDC can be used as-is, or it can be + regenerated on the new system. + + Next, create /etc/krb5.keytab on the + server. This is the main part of Kerberizing a + service — it corresponds to generating a secret shared + between the service and the KDC. The + secret is a cryptographic key, stored in a + keytab. The keytab contains the server's host + key, which allows it and the KDC to verify + each others' identity. It must be transmitted to the server + in a secure fashion, as the security of the server can be + broken if the key is made public. Typically, the + keytab is generated on an administrator's + trusted machine using kadmin, then securely + transferred to the server, e.g., with &man.scp.1;; it can also + be created directly on the server if that is consistent with + the desired security policy. It is very important that the + keytab is transmitted to the server in a secure fashion: if + the key is known by some other party, that party can + impersonate any user to the server! Using + kadmin on the server directly is + convenient, because the entry for the host principal in the + KDC database is also created using + kadmin. + + Of course, kadmin is a kerberized + service; a Kerberos ticket is + needed to authenticate to the network service, but to ensure + that the user running kadmin is actually + present (and their session has not been hijacked), + kadmin will prompt for the password to get + a fresh ticket. The principal authenticating to the kadmin + service must be permitted to use the kadmin + interface, as specified in kadmind.acl. + See the section titled Remote administration in + info heimdal for details on designing + access control lists. Instead of enabling remote + kadmin access, the administrator could + securely connect to the KDC via the local + console or &man.ssh.1;, and perform administration locally + using kadmin -l. + + After installing /etc/krb5.conf, + use add --random-key in + kadmin. This adds the server's host + principal to the database, but does not extract a copy of the + host principal key to a keytab. To generate the keytab, use + ext to extract the server's host principal + key to its own keytab: -Principal name: jane -Instance: + &prompt.root; kadmin +kadmin> add --random-key host/myserver.example.org +Max ticket life [unlimited]: +Max renewable life [unlimited]: +Principal expiration time [never]: +Password expiration time [never]: +Attributes []: +kadmin> ext_keytab host/myserver.example.org +kadmin> exit -<Not found>, Create [y] ? y + Note that ext_keytab stores the + extracted key in /etc/krb5.keytab by + default. This is good when being run on the server being + kerberized, but the --keytab + path/to/file argument + should be used when the keytab is being extracted + elsewhere: -Principal: jane, Instance: , kdc_key_ver: 1 -New Password: <---- enter a secure password here -Verifying password + &prompt.root; kadmin +kadmin> ext_keytab --keytab=/tmp/example.keytab host/myserver.example.org +kadmin> exit -New Password: <---- re-enter the password here -Principal's new key version = 1 -Expiration date (enter yyyy-mm-dd) [ 2000-01-01 ] ? -Max ticket lifetime (*5 minutes) [ 255 ] ? -Attributes [ 0 ] ? -Edit O.K. -Principal name: <---- null entry here will cause an exit + The keytab can then be securely copied to the server + using &man.scp.1; or a removable media. Be sure to specify a + non-default keytab name to avoid inserting unneeded keys into + the system's keytab. + + At this point, the server can read encrypted messages from + the KDC using its shared key, stored in + krb5.keytab. It is now ready for the + Kerberos-using services to be + enabled. One of the most common such services is + &man.sshd.8;, which supports + Kerberos via the + GSS-API. In + /etc/ssh/sshd_config, add the + line: + + GSSAPIAuthentication yes + + After making this change, &man.sshd.8; must be restared + for the new configuration to take effect: + service sshd restart. - Testing It All Out - - First we have to start the Kerberos daemons. Note that if you - have correctly edited your /etc/rc.conf then this - will happen automatically when you reboot. This is only necessary on - the Kerberos server. Kerberos clients will automatically get what - they need from the /etc/kerberosIV - directory. - - &prompt.root; kerberos & -Kerberos server starting -Sleep forever on error -Log file is /var/log/kerberos.log -Current Kerberos master key version is 1. - -Master key entered. BEWARE! - -Current Kerberos master key version is 1 -Local realm: EXAMPLE.COM -&prompt.root; kadmind -n & -KADM Server KADM0.0A initializing -Please do not use 'kill -9' to kill this job, use a -regular kill instead - -Current Kerberos master key version is 1. - -Master key entered. BEWARE! - - Now we can try using the kinit command to get a - ticket for the ID jane that we created - above: - - &prompt.user; kinit jane -MIT Project Athena (grunt.example.com) -Kerberos Initialization for "jane" -Password: + Configuring a Client to Use + <application>Kerberos</application> - Try listing the tokens using klist to see if we - really have them: + + Kerberos5 + configure clients + - &prompt.user; klist -Ticket file: /tmp/tkt245 -Principal: jane@EXAMPLE.COM + As it was for the server, the client requires + configuration in /etc/krb5.conf. Copy + the file in place (securely) or re-enter it as needed. + + Test the client by using kinit, + klist, and kdestroy from + the client to obtain, show, and then delete a ticket for an + existing principal. Kerberos + applications should also be able to connect to + Kerberos enabled servers. If that + does not work but obtaining a ticket does, the problem is + likely with the server and not with the client or the + KDC. In the case of kerberized + &man.ssh.1;, GSS-API is disabled by + default, so test using ssh -o + GSSAPIAuthentication=yes + hostname. + + When testing a Kerberized application, try using a packet + sniffer such as tcpdump to confirm that no + sensitive information is sent in the clear. + + Various Kerberos client + applications are available. With the advent of a bridge so + that applications using SASL for + authentication can use GSS-API mechanisms + as well, large classes of client applications can use + Kerberos for authentication, from + Jabber clients to IMAP clients. + + + .k5login + - Issued Expires Principal -Apr 30 11:23:22 Apr 30 19:23:22 krbtgt.EXAMPLE.COM@EXAMPLE.COM + + .k5users + + + Users within a realm typically have their + Kerberos principal mapped to a + local user account. Occasionally, one needs to grant access + to a local user account to someone who does not have a + matching Kerberos principal. For + example, tillman@EXAMPLE.ORG may need + access to the local user account webdevelopers. Other + principals may also need access to that local account. + + The .k5login and + .k5users files, placed in a user's home + directory, can be used to solve this problem. For example, if + the following .k5login is placed in the + home directory of webdevelopers, both principals + listed will have access to that account without requiring a + shared password.: - Now try changing the password using &man.passwd.1; to - check if the kpasswd daemon can get - authorization to the Kerberos database: + tillman@example.org +jdoe@example.org - &prompt.user; passwd -realm EXAMPLE.COM -Old password for jane: -New Password for jane: -Verifying password -New Password for jane: -Password changed. + Refer to &man.ksu.1; for more information about + .k5users. - Adding <command>su</command> Privileges + <acronym>MIT</acronym> Differences - Kerberos allows us to give each user - who needs root privileges their own - separate &man.su.1; password. - We could now add an ID which is authorized to - &man.su.1; to root. This is - controlled by having an instance of root - associated with a principal. Using kdb_edit - we can create the entry jane.root in the - Kerberos database: - - &prompt.root; kdb_edit -Opening database... - -Enter Kerberos master key: - -Current Kerberos master key version is 1. - -Master key entered. BEWARE! -Previous or default values are in [brackets] , -enter return to leave the same, or new value. - -Principal name: jane -Instance: root - -<Not found>, Create [y] ? y - -Principal: jane, Instance: root, kdc_key_ver: 1 -New Password: <---- enter a SECURE password here -Verifying password - -New Password: <---- re-enter the password here - -Principal's new key version = 1 -Expiration date (enter yyyy-mm-dd) [ 2000-01-01 ] ? -Max ticket lifetime (*5 minutes) [ 255 ] ? 12 <--- Keep this short! -Attributes [ 0 ] ? -Edit O.K. -Principal name: <---- null entry here will cause an exit - - Now try getting tokens for it to make sure it works: - - &prompt.root; kinit jane.root -MIT Project Athena (grunt.example.com) -Kerberos Initialization for "jane.root" -Password: - - Now we need to add the user to root's - .klogin file: - - &prompt.root; cat /root/.klogin -jane.root@EXAMPLE.COM - - Now try doing the &man.su.1;: - - &prompt.user; su -Password: - - and take a look at what tokens we have: - - &prompt.root; klist -Ticket file: /tmp/tkt_root_245 -Principal: jane.root@EXAMPLE.COM - - Issued Expires Principal -May 2 20:43:12 May 3 04:43:12 krbtgt.EXAMPLE.COM@EXAMPLE.COM + The major difference between the MIT + and Heimdal implementations is that kadmin + has a different, but equivalent, set of commands and uses a + different protocol. If the KDC is + MIT, the Heimdal version of + kadmin cannot be used to administer the + KDC remotely, and vice versa. + + Client applications may also use slightly different + command line options to accomplish the same tasks. Following + the instructions at http://web.mit.edu/Kerberos/www/ + is recommended. Be careful of path issues: the + MIT port installs into + /usr/local/ by default, and the &os; + system applications run instead of the + MIT versions if PATH lists + the system directories first. + + When using MIT Kerberos as a KDC on + &os;, the following edits should also be made to + rc.conf: + + kerberos5_server="/usr/local/sbin/krb5kdc" +kadmind5_server="/usr/local/sbin/kadmind" +kerberos5_server_flags="" +kerberos5_server_enable="YES" +kadmind5_server_enable="YES" - Using Other Commands + <application>Kerberos</application> Tips, Tricks, and + Troubleshooting + + When configuring and troubleshooting + Kerberos, keep the following points + in mind: - In an earlier example, we created a principal called - jane with an instance root. - This was based on a user with the same name as the principal, and this - is a Kerberos default; that a - <principal>.<instance> of the form - <username>.root will allow - that <username> to &man.su.1; to - root if the necessary entries are in the - .klogin file in root's - home directory: - - &prompt.root; cat /root/.klogin -jane.root@EXAMPLE.COM - - Likewise, if a user has in their own home directory lines of the - form: - - &prompt.user; cat ~/.klogin -jane@EXAMPLE.COM -jack@EXAMPLE.COM - - This allows anyone in the EXAMPLE.COM realm - who has authenticated themselves as jane or - jack (via kinit, see above) - to access to jane's - account or files on this system (grunt) via - &man.rlogin.1;, &man.rsh.1; or - &man.rcp.1;. - - For example, jane now logs into another system using - Kerberos: - - &prompt.user; kinit -MIT Project Athena (grunt.example.com) -Password: -&prompt.user; rlogin grunt -Last login: Mon May 1 21:14:47 from grumble -Copyright (c) 1980, 1983, 1986, 1988, 1990, 1991, 1993, 1994 - The Regents of the University of California. All rights reserved. - -FreeBSD BUILT-19950429 (GR386) #0: Sat Apr 29 17:50:09 SAT 1995 - - Or jack logs into jane's account on the same machine - (jane having - set up the .klogin file as above, and the person - in charge of Kerberos having set up principal - jack with a null instance): - - &prompt.user; kinit -&prompt.user; rlogin grunt -l jane -MIT Project Athena (grunt.example.com) -Password: -Last login: Mon May 1 21:16:55 from grumble -Copyright (c) 1980, 1983, 1986, 1988, 1990, 1991, 1993, 1994 - The Regents of the University of California. All rights reserved. -FreeBSD BUILT-19950429 (GR386) #0: Sat Apr 29 17:50:09 SAT 1995 - - - - - <application>Kerberos5</application> - - TillmanHodgsonContributed by - - - MarkMurrayBased on a contribution by - - + + + When using either Heimdal or MIT + Kerberos from ports, ensure + that the PATH lists the port's versions of + the client applications before the system versions. + - + + If all the computers in the realm do not have + synchronized time settings, authentication may fail. + describes how to synchronize + clocks using NTP. + - Every &os; release beyond &os;-5.1 includes support - only for Kerberos5. Hence - Kerberos5 is the only version - included, and its configuration is similar in many aspects - to that of KerberosIV. The following - information only applies to - Kerberos5 in post &os;-5.0 - releases. Users who wish to use the - KerberosIV package may install the - security/krb4 port. - - Kerberos is a network add-on - system/protocol that allows users to authenticate themselves - through the services of a secure server. Services such as remote - login, remote copy, secure inter-system file copying and other - high-risk tasks are made considerably safer and more - controllable. - - Kerberos can be described as an - identity-verifying proxy system. It can also be described as a - trusted third-party authentication system. - Kerberos provides only one - function — the secure authentication of users on the network. - It does not provide authorization functions (what users are - allowed to do) or auditing functions (what those users did). - After a client and server have used - Kerberos to prove their identity, they - can also encrypt all of their communications to assure privacy - and data integrity as they go about their business. + + If the hostname is changed, the host/ principal must be + changed and the keytab updated. This also applies to + special keytab entries like the HTTP/ principal used for + Apache's www/mod_auth_kerb. + - Therefore it is highly recommended that - Kerberos be used with other security - methods which provide authorization and audit services. + + All hosts in the realm must be both forward and + reverse resolvable in DNS or, at a + minimum, exist in /etc/hosts. CNAMEs + will work, but the A and PTR records must be correct and + in place. The error message for unresolvable hosts is not + intuitive: Kerberos5 refuses authentication + because Read req failed: Key table entry not + found. + - The following instructions can be used as a guide on how to set - up Kerberos as distributed for &os;. - However, you should refer to the relevant manual pages for a complete - description. + + Some operating systems that act as clients to the + KDC do not set the permissions for + ksu to be setuid root. This means that + ksu does not work. This is a + permissions problem, not a KDC + error. + - For purposes of demonstrating a Kerberos - installation, the various name spaces will be handled as follows: + + With MIT + Kerberos, to allow a principal + to have a ticket life longer than the default lifetime of + ten hours, use modify_principal at the + &man.kadmin.8; prompt to change the + maxlife of both the principal in + question and the + krbtgt + principal. The principal can then use + kinit -l to request a ticket with a + longer lifetime. + - - - The DNS domain (zone) - will be example.org. - + + When running a packet sniffer on the + KDC to aid in troubleshooting while + running kinit from a workstation, the + Ticket Granting Ticket (TGT) is sent + immediately, even before the password is typed. This is + because the Kerberos server + freely transmits a TGT to any + unauthorized request. However, every + TGT is encrypted in a key derived from + the user's password. When a user types their password, it + is not sent to the KDC, it is instead + used to decrypt the TGT that + kinit already obtained. If the + decryption process results in a valid ticket with a valid + time stamp, the user has valid + Kerberos credentials. These + credentials include a session key for establishing secure + communications with the + Kerberos server in the future, + as well as the actual TGT, which is + encrypted with the Kerberos + server's own key. This second layer of encryption allows + the Kerberos server to verify + the authenticity of each TGT. + - - The Kerberos realm will be - EXAMPLE.ORG. - - + + Host principals can have a longer ticket lifetime. If + the user principal has a lifetime of a week but the host + being connected to has a lifetime of nine hours, the user + cache will have an expired host principal and the ticket + cache will not work as expected. + - - Please use real domain names when setting up - Kerberos even if you intend to run - it internally. This avoids DNS problems - and assures inter-operation with other - Kerberos realms. - + + When setting up krb5.dict to + prevent specific bad passwords from being used as + described in &man.kadmind.8;, remember that it only + applies to principals that have a password policy assigned + to them. The format used in + krb5.dict is one string per line. + Creating a symbolic link to + /usr/share/dict/words might be + useful. + + + - History + Mitigating <application>Kerberos</application> + Limitations + Kerberos5 - history + limitations and shortcomings - Kerberos was created by - MIT as a solution to network security problems. - The Kerberos protocol uses strong - cryptography so that a client can prove its identity to a server - (and vice versa) across an insecure network connection. - - Kerberos is both the name of a - network authentication protocol and an adjective to describe - programs that implement the program - (Kerberos telnet, for example). The - current version of the protocol is version 5, described in - RFC 1510. - - Several free implementations of this protocol are available, - covering a wide range of operating systems. The Massachusetts - Institute of Technology (MIT), where - Kerberos was originally developed, - continues to develop their Kerberos - package. It is commonly used in the US - as a cryptography product, as such it - has historically been affected by US export - regulations. The MIT - Kerberos is available as a port - (security/krb5). Heimdal - Kerberos is another version 5 - implementation, and was explicitly developed outside of the - US to avoid export - regulations (and is thus often included in non-commercial &unix; - variants). The Heimdal Kerberos - distribution is available as a port - (security/heimdal), and a - minimal installation of it is included in the base &os; - install. - - In order to reach the widest audience, these instructions assume - the use of the Heimdal distribution included in &os;. - + Since Kerberos is an all or + nothing approach, every service enabled on the network must + either be modified to work with + Kerberos or be otherwise secured + against network attacks. This is to prevent user credentials + from being stolen and re-used. An example is when + Kerberos is enabled on all remote + shells but the non-Kerberized POP3 mail + server sends passwords in plain text. + + The KDC is a single point of failure. + By design, the KDC must be as secure as its + master password database. The KDC should + have absolutely no other services running on it and should be + physically secure. The danger is high because + Kerberos stores all passwords + encrypted with the same master key which is stored as a file + on the KDC. + + A compromised master key is not quite as bad as one might + fear. The master key is only used to encrypt the + Kerberos database and as a seed for + the random number generator. As long as access to the + KDC is secure, an attacker cannot do much + with the master key. + + If the KDC is unavailable, network + services are unusable as authentication cannot be performed. + This can be alleviated with a single master + KDC and one or more slaves, and with + careful implementation of secondary or fall-back + authentication using PAM. + + Kerberos allows users, hosts + and services to authenticate between themselves. It does not + have a mechanism to authenticate the + KDC to the users, hosts, or services. This + means that a trojanned kinit could record + all user names and passwords. File system integrity checking + tools like security/tripwire can + alleviate this. - Setting up a Heimdal <acronym>KDC</acronym> + Resources and Further Information + Kerberos5 - Key Distribution Center + external resources - The Key Distribution Center (KDC) is the - centralized authentication service that - Kerberos provides — it is the - computer that issues Kerberos tickets. - The KDC is considered trusted by - all other computers in the Kerberos - realm, and thus has heightened security concerns. - - Note that while running the Kerberos - server requires very few computing resources, a dedicated machine - acting only as a KDC is recommended for security - reasons. - - To begin setting up a KDC, ensure that your - /etc/rc.conf file contains the correct - settings to act as a KDC (you may need to adjust - paths to reflect your own system): - - kerberos5_server_enable="YES" -kadmind5_server_enable="YES" -kerberos_stash="YES" - - - The is only available in - &os; 4.X. - - - Next we will set up your Kerberos - config file, /etc/krb5.conf: - - [libdefaults] - default_realm = EXAMPLE.ORG -[realms] - EXAMPLE.ORG = { - kdc = kerberos.example.org - admin_server = kerberos.example.org - } -[domain_realm] - .example.org = EXAMPLE.ORG - - Note that this /etc/krb5.conf file implies - that your KDC will have the fully-qualified - hostname of kerberos.example.org. - You will need to add a CNAME (alias) entry to your zone file to - accomplish this if your KDC has a different - hostname. - - - For large networks with a properly configured - BIND DNS server, the - above example could be trimmed to: - - [libdefaults] - default_realm = EXAMPLE.ORG - - With the following lines being appended to the - example.org zonefile: - - _kerberos._udp IN SRV 01 00 88 kerberos.example.org. -_kerberos._tcp IN SRV 01 00 88 kerberos.example.org. -_kpasswd._udp IN SRV 01 00 464 kerberos.example.org. -_kerberos-adm._tcp IN SRV 01 00 749 kerberos.example.org. -_kerberos IN TXT EXAMPLE.ORG - - - For clients to be able to find the - Kerberos services, you - must have either a fully configured - /etc/krb5.conf or a miminally configured - /etc/krb5.conf and a - properly configured DNS server. - - - Next we will create the Kerberos - database. This database contains the keys of all principals encrypted - with a master password. You are not - required to remember this password, it will be stored in a file - (/var/heimdal/m-key). To create the master - key, run kstash and enter a password. - - Once the master key has been created, you can initialize the - database using the kadmin program with the - -l option (standing for local). - This option instructs kadmin to modify the - database files directly rather than going through the - kadmind network service. This handles the - chicken-and-egg problem of trying to connect to the database - before it is created. Once you have the kadmin - prompt, use the init command to create your - realms initial database. - - Lastly, while still in kadmin, create your - first principal using the add command. Stick - to the defaults options for the principal for now, you can always - change them later with the modify command. - Note that you can use the ? command at any - prompt to see the available options. - - A sample database creation session is shown below: - - &prompt.root; kstash -Master key: xxxxxxxx -Verifying password - Master key: xxxxxxxx - -&prompt.root; kadmin -l -kadmin> init EXAMPLE.ORG -Realm max ticket life [unlimited]: -kadmin> add tillman -Max ticket life [unlimited]: -Max renewable life [unlimited]: -Attributes []: -Password: xxxxxxxx -Verifying password - Password: xxxxxxxx - - Now it is time to start up the KDC services. - Run /etc/rc.d/kerberos start and - /etc/rc.d/kadmind start to bring up the - services. Note that you will not have any kerberized daemons running - at this point but you should be able to confirm the that the - KDC is functioning by obtaining and listing a - ticket for the principal (user) that you just created from the - command-line of the KDC itself: - - &prompt.user; k5init tillman -tillman@EXAMPLE.ORG's Password: - -&prompt.user; k5list -Credentials cache: FILE:/tmp/krb5cc_500 - Principal: tillman@EXAMPLE.ORG - - Issued Expires Principal -Aug 27 15:37:58 Aug 28 01:37:58 krbtgt/EXAMPLE.ORG@EXAMPLE.ORG - - - - - <application>Kerberos</application> enabling a server with - Heimdal services - - - Kerberos5 - enabling services - - - First, we need a copy of the Kerberos - configuration file, /etc/krb5.conf. To do - so, simply copy it over to the client computer from the - KDC in a secure fashion (using network utilities, - such as &man.scp.1;, or physically via a - floppy disk). - - Next you need a /etc/krb5.keytab file. - This is the major difference between a server providing - Kerberos enabled daemons and a - workstation — the server must have a - keytab file. This file - contains the servers host key, which allows it and the - KDC to verify each others identity. It - must be transmitted to the server in a secure fashion, as the - security of the server can be broken if the key is made public. - This explicitly means that transferring it via a clear text - channel, such as FTP, is a very bad idea. - - Typically, you transfer to the keytab - to the server using the kadmin program. - This is handy because you also need to create the host principal - (the KDC end of the - krb5.keytab) using - kadmin. - - Note that you must have already obtained a ticket and that this - ticket must be allowed to use the kadmin - interface in the kadmind.acl. See the section - titled Remote administration in the Heimdal info - pages (info heimdal) for details on designing - access control lists. If you do not want to enable remote - kadmin access, you can simply securely connect - to the KDC (via local console, - &man.ssh.1; or Kerberos - &man.telnet.1;) and perform administration locally - using kadmin -l. - - After installing the /etc/krb5.conf file, - you can use kadmin from the - Kerberos server. The - add --random-key command will let you add the - servers host principal, and the ext command - will allow you to extract the servers host principal to its own - keytab. For example: - - &prompt.root; kadmin -kadmin> add --random-key host/myserver.example.org -Max ticket life [unlimited]: -Max renewable life [unlimited]: -Attributes []: -kadmin> ext host/myserver.example.org -kadmin> exit - - Note that the ext command (short for - extract) stores the extracted key in - /etc/krb5.keytab by default. - - If you do not have kadmind running on the - KDC (possibly for security reasons) and thus - do not have access to kadmin remotely, you - can add the host principal - (host/myserver.EXAMPLE.ORG) directly on the - KDC and then extract it to a temporary file - (to avoid over-writing the /etc/krb5.keytab - on the KDC) using something like this: - - &prompt.root; kadmin -kadmin> ext --keytab=/tmp/example.keytab host/myserver.example.org -kadmin> exit - - You can then securely copy the keytab to the server - computer (using scp or a floppy, for - example). Be sure to specify a non-default keytab name - to avoid over-writing the keytab on the - KDC. - - At this point your server can communicate with the - KDC (due to its krb5.conf - file) and it can prove its own identity (due to the - krb5.keytab file). It is now ready for - you to enable some Kerberos services. - For this example we will enable the telnet - service by putting a line like this into your - /etc/inetd.conf and then restarting the - &man.inetd.8; service with - /etc/rc.d/inetd restart: - - telnet stream tcp nowait root /usr/libexec/telnetd telnetd -a user - - The critical bit is that the -a - (for authentication) type is set to user. Consult the - &man.telnetd.8; manual page for more details. - - - - - <application>Kerberos</application> enabling a client with Heimdal - - - Kerberos5 - configure clients - - - Setting up a client computer is almost trivially easy. As - far as Kerberos configuration goes, - you only need the Kerberos - configuration file, located at /etc/krb5.conf. - Simply securely copy it over to the client computer from the - KDC. - - Test your client computer by attempting to use - kinit, klist, and - kdestroy from the client to obtain, show, and - then delete a ticket for the principal you created above. You - should also be able to use Kerberos - applications to connect to Kerberos - enabled servers, though if that does not work and obtaining a - ticket does the problem is likely with the server and not with - the client or the KDC. - - When testing an application like telnet, - try using a packet sniffer (such as &man.tcpdump.1;) - to confirm that your password is not sent in the clear. Try - using telnet with the -x - option, which encrypts the entire data stream (similar to - ssh). - - The core Kerberos client applications - (traditionally named kinit, - klist, kdestroy, and - kpasswd) are installed in - the base &os; install. Note that &os; versions prior to 5.0 - renamed them to k5init, - k5list, k5destroy, - k5passwd, and k5stash - (though it is typically only used once). - - Various non-core Kerberos client - applications are also installed by default. This is where the - minimal nature of the base Heimdal installation is - felt: telnet is the only - Kerberos enabled service. - - The Heimdal port adds some of the missing client applications: - Kerberos enabled versions of - ftp, rsh, - rcp, rlogin, and a few - other less common programs. The MIT port also - contains a full suite of Kerberos - client applications. - - - - - User configuration files: <filename>.k5login</filename> and <filename>.k5users</filename> - - - .k5login - - - - .k5users - - - Users within a realm typically have their - Kerberos principal (such as - tillman@EXAMPLE.ORG) mapped to a local - user account (such as a local account named - tillman). Client applications such as - telnet usually do not require a user name - or a principal. - - Occasionally, however, you want to grant access to a local - user account to someone who does not have a matching - Kerberos principal. For example, - tillman@EXAMPLE.ORG may need access to the - local user account webdevelopers. Other - principals may also need access to that local account. - - The .k5login and - .k5users files, placed in a users home - directory, can be used similar to a powerful combination of - .hosts and .rhosts, - solving this problem. For example, if a - .k5login with the following - contents: - - tillman@example.org -jdoe@example.org - - Were to be placed into the home directory of the local user - webdevelopers then both principals listed - would have access to that account without requiring a shared - password. - - Reading the manual pages for these commands is recommended. - Note that the ksu manual page covers - .k5users. - - - - - <application>Kerberos</application> Tips, Tricks, and Troubleshooting - - - Kerberos5 - troubleshooting - - - - - When using either the Heimdal or MIT - Kerberos ports ensure that your - PATH environment variable lists the - Kerberos versions of the client - applications before the system versions. - - - - Do all the computers in your realm have synchronized - time settings? If not, authentication may fail. - describes how to synchronize - clocks using NTP. - - - - MIT and Heimdal inter-operate nicely. - Except for kadmin, the protocol for - which is not standardized. - - - - If you change your hostname, you also need to change your - host/ principal and update your keytab. - This also applies to special keytab entries like the - www/ principal used for Apache's - www/mod_auth_kerb. - - - - All hosts in your realm must be resolvable (both forwards - and reverse) in DNS (or - /etc/hosts as a minimum). CNAMEs - will work, but the A and PTR records must be correct and in - place. The error message is not very intuitive: - Kerberos5 refuses authentication because Read req - failed: Key table entry not found. - - - - Some operating systems that may being acting as clients - to your KDC do not set the permissions - for ksu to be setuid - root. This means that - ksu does not work, which is a good - security idea but annoying. This is not a - KDC error. - - - - With MIT - Kerberos, if you want to allow a - principal to have a ticket life longer than the default ten - hours, you must use modify_principal in - kadmin to change the maxlife of both the - principal in question and the krbtgt - principal. Then the principal can use the - -l option with kinit - to request a ticket with a longer lifetime. - - - - If you run a packet sniffer on your - KDC to add in troubleshooting and then - run kinit from a workstation, you will - notice that your TGT is sent - immediately upon running kinit — - even before you type your password! The explanation is - that the Kerberos server freely - transmits a TGT (Ticket Granting - Ticket) to any unauthorized request; however, every - TGT is encrypted in a key derived from - the user's password. Therefore, when a user types their - password it is not being sent to the KDC, - it is being used to decrypt the TGT that - kinit already obtained. If the decryption - process results in a valid ticket with a valid time stamp, - the user has valid Kerberos - credentials. These credentials include a session key for - establishing secure communications with the - Kerberos server in the future, as - well as the actual ticket-granting ticket, which is actually - encrypted with the Kerberos - server's own key. This second layer of encryption is - unknown to the user, but it is what allows the - Kerberos server to verify - the authenticity of each TGT. - - - - If you want to use long ticket lifetimes (a week, for - example) and you are using OpenSSH - to connect to the machine where your ticket is stored, make - sure that Kerberos - is set to no - in your sshd_config or else your tickets - will be deleted when you log out. - - - - Remember that host principals can have a longer ticket - lifetime as well. If your user principal has a lifetime of a - week but the host you are connecting to has a lifetime of nine - hours, you will have an expired host principal in your cache - and the ticket cache will not work as expected. - - - - When setting up a krb5.dict file to - prevent specific bad passwords from being used (the manual page - for kadmind covers this briefly), remember - that it only applies to principals that have a password policy - assigned to them. The krb5.dict files - format is simple: one string per line. Creating a symbolic - link to /usr/share/dict/words might be - useful. - - - - - - - Differences with the <acronym>MIT</acronym> port - - The major difference between the MIT - and Heimdal installs relates to the kadmin - program which has a different (but equivalent) set of commands - and uses a different protocol. This has a large implications - if your KDC is MIT as you - will not be able to use the Heimdal kadmin - program to administer your KDC remotely - (or vice versa, for that matter). - - The client applications may also take slightly different - command line options to accomplish the same tasks. Following - the instructions on the MIT - Kerberos web site - (http://web.mit.edu/Kerberos/www/) - is recommended. Be careful of path issues: the - MIT port installs into - /usr/local/ by default, and the - normal system applications may be run instead - of MIT if your PATH - environment variable lists the system directories first. - - With the MIT - security/krb5 port - that is provided by &os;, be sure to read the - /usr/local/share/doc/krb5/README.FreeBSD - file installed by the port if you want to understand why logins - via telnetd and klogind - behave somewhat oddly. Most importantly, correcting the - incorrect permissions on cache file behavior - requires that the login.krb5 binary be used - for authentication so that it can properly change ownership for - the forwarded credentials. - - - - - Mitigating limitations found in <application>Kerberos</application> - - - Kerberos5 - limitations and shortcomings - - - - <application>Kerberos</application> is an all-or-nothing approach - - Every service enabled on the network must be modified to - work with Kerberos (or be otherwise - secured against network attacks) or else the users credentials - could be stolen and re-used. An example of this would be - Kerberos enabling all remote shells - (via rsh and telnet, for - example) but not converting the POP3 mail - server which sends passwords in plain text. - - - - - <application>Kerberos</application> is intended for single-user workstations - - In a multi-user environment, - Kerberos is less secure. - This is because it stores the tickets in the - /tmp directory, which is readable by all - users. If a user is sharing a computer with several other - people simultaneously (i.e. multi-user), it is possible that - the user's tickets can be stolen (copied) by another - user. - - This can be overcome with the -c - filename command-line option or (preferably) the - KRB5CCNAME environment variable, but this - is rarely done. In principal, storing the ticket in the users - home directory and using simple file permissions can mitigate - this problem. - - - - - The KDC is a single point of failure - - By design, the KDC must be as secure as - the master password database is contained on it. The - KDC should have absolutely no other - services running on it and should be physically secured. The - danger is high because Kerberos - stores all passwords encrypted with the same key (the - master key), which in turn is stored as a file - on the KDC. - - As a side note, a compromised master key is not quite as - bad as one might normally fear. The master key is only used - to encrypt the Kerberos database - and as a seed for the random number generator. As long as - access to your KDC is secure, an attacker - cannot do much with the master key. - - Additionally, if the KDC is unavailable - (perhaps due to a denial of service attack or network problems) - the network services are unusable as authentication can not be - performed, a recipe for a denial-of-service attack. This can - alleviated with multiple KDCs (a single - master and one or more slaves) and with careful implementation - of secondary or fall-back authentication - (PAM is excellent for this). - - - - - <application>Kerberos</application> Shortcomings - - Kerberos allows users, hosts - and services to authenticate between themselves. It does not - have a mechanism to authenticate the KDC - to the users, hosts or services. This means that a trojanned - kinit (for example) could record all user - names and passwords. Something like - security/tripwire or - other file system integrity checking tools can alleviate - this. - - - - - - Resources and further information - - - Kerberos5 - external resources - - - - - - The Kerberos FAQ + + + + The Kerberos + FAQ - Designing - an Authentication System: a Dialog in Four Scenes + Designing + an Authentication System: a Dialog in Four + Scenes - RFC 1510, - The Kerberos Network Authentication Service - (V5) + RFC + 4120, The Kerberos Network + Authentication Service (V5) - MIT - Kerberos home page + MIT + Kerberos home + page - Heimdal - Kerberos home page + Heimdal + Kerberos home + page - - + - OpenSSL + + OpenSSL + - TomRhodesWritten by: + TomRhodesWritten + by - + security OpenSSL - One feature that many users overlook is the - OpenSSL toolkit included - in &os;. OpenSSL provides an - encryption transport layer on top of the normal communications - layer; thus allowing it to be intertwined with many network - applications and services. - - Some uses of OpenSSL may include - encrypted authentication of mail clients, web based transactions - such as credit card payments and more. Many ports such as - www/apache13-ssl, and - mail/sylpheed-claws - will offer compilation support for building with + OpenSSL is an open source + implementation of the SSL and + TLS protocols. It provides an encryption + transport layer on top of the normal communications layer, + allowing it to be intertwined with many network applications and + services. + + The version of OpenSSL included + in &os; supports the Secure Sockets Layer v2/v3 (SSLv2/SSLv3) + and Transport Layer Security v1 (TLSv1) network security + protocols and can be used as a general cryptographic + library. + + OpenSSL is often used to encrypt + authentication of mail clients and to secure web based + transactions such as credit card payments. Some ports, such as + www/apache24 and + databases/postgresql91-server, include a + compile option for building with OpenSSL. - - In most cases the Ports Collection will attempt to build - the security/openssl port - unless the WITH_OPENSSL_BASE make variable - is explicitly set to yes. - + &os; provides two versions of + OpenSSL: one in the base system and + one in the Ports Collection. Users can choose which version to + use by default for other ports using the following knobs: - The version of OpenSSL included - in &os; supports Secure Sockets Layer v2/v3 (SSLv2/SSLv3), - Transport Layer Security v1 (TLSv1) network security protocols - and can be used as a general cryptographic library. + + + WITH_OPENSSL_PORT: when set, the port will use + OpenSSL from the + security/openssl port, even if the + version in the base system is up to date or newer. + - - While OpenSSL supports the - IDEA algorithm, it is disabled by default - due to United States patents. To use it, the license should - be reviewed and, if the restrictions are acceptable, the - MAKE_IDEA variable must be set in - make.conf. - + + WITH_OPENSSL_BASE: when set, the port will compile + against OpenSSL provided by the + base system. + + - One of the most common uses of - OpenSSL is to provide certificates for - use with software applications. These certificates ensure - that the credentials of the company or individual are valid - and not fraudulent. If the certificate in question has - not been verified by one of the several Certificate Authorities, - or CAs, a warning is usually produced. A - Certificate Authority is a company, such as VeriSign, which will - sign certificates in order to validate credentials of individuals - or companies. This process has a cost associated with it and - is definitely not a requirement for using certificates; however, - it can put some of the more paranoid users at ease. + Another common use of OpenSSL is + to provide certificates for use with software applications. + Certificates can be used to verify the credentials of a company + or individual. If a certificate has not been signed by an + external Certificate Authority + (CA), such as http://www.verisign.com, + the application that uses the certificate will produce a + warning. There is a cost associated with obtaining a signed + certificate and using a signed certificate is not mandatory as + certificates can be self-signed. However, using an external + authority will prevent warnings and can put users at + ease. + + This section demonstrates how to create and use certificates + on a &os; system. Refer to for an + example of how to create a CA for signing + one's own certificates. Generating Certificates @@ -2963,8 +1874,16 @@ certificate generation - To generate a certificate, the following command is - available: + To generate a certificate that will be signed by an + external CA, issue the following command + and input the information requested at the prompts. This + input information will be written to the certificate. At the + Common Name prompt, input the fully + qualified name for the system that will use the certificate. + If this name does not match the server, the application + verifying the certificate will issue a warning to the user, + rendering the verification provided by the certificate as + useless. &prompt.root; openssl req -new -nodes -out req.pem -keyout cert.pem Generating a 1024 bit RSA private key @@ -2979,114 +1898,134 @@ For some fields there will be a default value, If you enter '.', the field will be left blank. ----- -Country Name (2 letter code) [AU]:US -State or Province Name (full name) [Some-State]:PA -Locality Name (eg, city) []:Pittsburgh -Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Company -Organizational Unit Name (eg, section) []:Systems Administrator -Common Name (eg, YOUR name) []:localhost.example.org -Email Address []:trhodes@FreeBSD.org +Country Name (2 letter code) [AU]:US +State or Province Name (full name) [Some-State]:PA +Locality Name (eg, city) []:Pittsburgh +Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Company +Organizational Unit Name (eg, section) []:Systems Administrator +Common Name (eg, YOUR name) []:localhost.example.org +Email Address []:trhodes@FreeBSD.org Please enter the following 'extra' attributes to be sent with your certificate request -A challenge password []:SOME PASSWORD -An optional company name []:Another Name +A challenge password []:SOME PASSWORD +An optional company name []:Another Name + + Other options, such as the expire time and alternate + encryption algorithms, are available when creating a + certificate. A complete list of options is described in + &man.openssl.1;. + + This command will create two files in the current + directory. The certificate request, + req.pem, can be sent to a + CA who will validate the entered + credentials, sign the request, and return the signed + certificate. The second file, + cert.pem, is the private key for the + certificate and should be stored in a secure location. If + this falls in the hands of others, it can be used to + impersonate the user or the server. + + Alternately, if a signature from a CA + is not required, a self-signed certificate can be created. + First, generate the RSA key: + + &prompt.root; openssl dsaparam -rand -genkey -out myRSA.key 1024 +0 semi-random bytes loaded +Generating DSA parameters, 1024 bit long prime +This could take some time +.............+........+...........+...+....+........+.....+++++++++++++++++++++++++++++++++++++++++++++++++++* +..........+.+...........+....+........+.................+.+++++++++++++++++++++++++++++++++++++++++++++++++++* + + Next, generate the CA key. When + prompted, enter a passphrase between 4 to 1023 characters. + Remember this passphrase as it is needed whenever the key is + used to sign a certificate. + + &prompt.root; openssl gendsa -des3 -out myca.key myRSA.key +Generating DSA key, 1024 bits +Enter PEM pass phrase: +Verifying - Enter PEM pass phrase: + + Use this key to create a self-signed certificate. When + prompted, enter the passphrase. Then follow the usual prompts + for creating a certificate: - Notice the response directly after the - Common Name prompt shows a domain name. - This prompt requires a server name to be entered for - verification purposes; placing anything but a domain name - would yield a useless certificate. Other options, for - instance expire time, alternate encryption algorithms, etc. - are available. A complete list may be obtained by viewing - the &man.openssl.1; manual page. - - Two files should now exist in - the directory in which the aforementioned command was issued. - The certificate request, req.pem, may be - sent to a certificate authority who will validate the credentials - that you entered, sign the request and return the certificate to - you. The second file created will be named cert.pem - and is the private key for the certificate and should be - protected at all costs; if this falls in the hands of others it - can be used to impersonate you (or your server). - - In cases where a signature from a CA is - not required, a self signed certificate can be created. First, - generate the RSA key: - - &prompt.root; openssl dsaparam -rand -genkey -out myRSA.key 1024 - - Next, generate the CA key: - - &prompt.root; openssl gendsa -des3 -out myca.key myRSA.key - - Use this key to create the certificate: - - &prompt.root; openssl req -new -x509 -days 365 -key myca.key -out new.crt - - Two new files should appear in the directory: a certificate - authority signature file, myca.key and the - certificate itself, new.crt. These should - be placed in a directory, preferably under - /etc, which is readable - only by root. Permissions of 0700 should be fine for this and - they can be set with the chmod - utility. + &prompt.root; openssl req -new -x509 -days 365 -key myca.key -out new.crt +Enter pass phrase for myca.key: +You are about to be asked to enter information that will be incorporated +into your certificate request. +What you are about to enter is what is called a Distinguished Name or a DN. +There are quite a few fields but you can leave some blank +For some fields there will be a default value, +If you enter '.', the field will be left blank. +----- +Country Name (2 letter code) [AU]:US +State or Province Name (full name) [Some-State]:PA +Locality Name (eg, city) []:Pittsburgh +Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Company +Organizational Unit Name (eg, section) []:Systems Administrator +Common Name (e.g. server FQDN or YOUR name) []:localhost.example.org +Email Address []:trhodes@FreeBSD.org + + This will create two new files in the current directory: a + certificate authority signature file, + myca.key, and the certificate itself, + new.crt. These should be placed in a + directory, preferably under /etc, which + is readable only by root. Permissions of + 0700 are appropriate for these files and + can be set using chmod. - Using Certificates, an Example - - So what can these files do? A good use would be to - encrypt connections to the Sendmail - MTA. This would dissolve the use of clear - text authentication for users who send mail via the local - MTA. + Using Certificates + + One use for a certificate is to encrypt connections to the + Sendmail mail server in order to + prevent the use of clear text authentication. - This is not the best use in the world as some - MUAs will present the user with an - error if they have not installed the certificate locally. - Refer to the documentation included with the software for - more information on certificate installation. + Some mail clients will display an error if the user has + not installed a local copy of the certificate. Refer to the + documentation included with the software for more + information on certificate installation. - The following lines should be placed inside the - local .mc file: - - dnl SSL Options -define(`confCACERT_PATH',`/etc/certs')dnl -define(`confCACERT',`/etc/certs/new.crt')dnl -define(`confSERVER_CERT',`/etc/certs/new.crt')dnl -define(`confSERVER_KEY',`/etc/certs/myca.key')dnl -define(`confTLS_SRV_OPTIONS', `V')dnl - - Where /etc/certs/ - is the directory to be used for storing the certificate - and key files locally. The last few requirements are a rebuild - of the local .cf file. This is easily - achieved by typing make - install within the - /etc/mail - directory. Follow that up with make - restart which should start the - Sendmail daemon. - - If all went well there will be no error messages in the - /var/log/maillog file and - Sendmail will show up in the process - list. + In &os; 10.0-RELEASE and above, it is possible to create a + self-signed certificate for + Sendmail automatically. To enable + this, add the following lines to + /etc/rc.conf: + + sendmail_enable="YES" +sendmail_cert_create="YES" +sendmail_cert_cn="localhost.example.org" + + This will automatically create a self-signed certificate, + /etc/mail/certs/host.cert, a signing key, + /etc/mail/certs/host.key, and a + CA certificate, + /etc/mail/certs/cacert.pem. The + certificate will use the Common Name + specified in . After saving + the edits, restart Sendmail: + + &prompt.root; service sendmail restart + + If all went well, there will be no error messages in + /var/log/maillog. For a simple test, + connect to the mail server's listening port using + telnet: - For a simple test, simply connect to the mail server - using the &man.telnet.1; utility: - - &prompt.root; telnet example.com 25 + &prompt.root; telnet example.com 25 Trying 192.0.34.166... -Connected to example.com. +Connected to example.com. Escape character is '^]'. -220 example.com ESMTP Sendmail 8.12.10/8.12.10; Tue, 31 Aug 2004 03:41:22 -0400 (EDT) -ehlo example.com +220 example.com ESMTP Sendmail 8.14.7/8.14.7; Fri, 18 Apr 2014 11:50:32 -0400 (EDT) +ehlo example.com 250-example.com Hello example.com [192.0.34.166], pleased to meet you 250-ENHANCEDSTATUSCODES 250-PIPELINING @@ -3099,1311 +2038,787 @@ 250-DELIVERBY 250 HELP quit -221 2.0.0 example.com closing connection +221 2.0.0 example.com closing connection Connection closed by foreign host. - If the STARTTLS line appears in the output - then everything is working correctly. + If the STARTTLS line appears in the + output, everything is working correctly. - VPN over IPsec + + <acronym>VPN</acronym> over + <acronym>IPsec</acronym> + - NikClayton -
nik@FreeBSD.org
-
Written by
+ + + Nik + Clayton + + +
+ nik@FreeBSD.org +
+
+ Written by +
-
- + + + + Hiten M. + Pandya + + +
+ hmp@FreeBSD.org +
+
+ Written by +
+
+
- IPsec + IPsec - Creating a VPN between two networks, separated by the - Internet, using FreeBSD gateways. - - - Understanding IPsec - - Hiten M.Pandya -
hmp@FreeBSD.org
-
Written by
-
-
- - - - This section will guide you through the process of setting - up IPsec, and to use it in an environment which consists of - FreeBSD and µsoft.windows; 2000/XP - machines, to make them communicate securely. In order to set up - IPsec, it is necessary that you are familiar with the concepts - of building a custom kernel (see - ). - - IPsec is a protocol which sits on top - of the Internet Protocol (IP) layer. It allows two or more - hosts to communicate in a secure manner (hence the name). The - FreeBSD IPsec network stack is based on the - KAME implementation, - which has support for both protocol families, IPv4 and - IPv6. + Internet Protocol Security (IPsec) is a + set of protocols which sit on top of the Internet Protocol + (IP) layer. It allows two or more hosts to + communicate in a secure manner by authenticating and encrypting + each IP packet of a communication session. + The &os; IPsec network stack is based on the + http://www.kame.net/ + implementation and supports both IPv4 and + IPv6 sessions. - - FreeBSD 5.X contains a hardware - accelerated IPsec stack, known as Fast - IPsec, that was obtained from OpenBSD. It employs - cryptographic hardware (whenever possible) via the - &man.crypto.4; subsystem to optimize the performance of IPsec. - This subsystem is new, and does not support all the features - that are available in the KAME version of IPsec. However, in - order to enable hardware-accelerated IPsec, the following - kernel option has to be added to your kernel configuration - file: - - - kernel options - FAST_IPSEC - + + IPsec + ESP + - -options FAST_IPSEC # new IPsec (cannot define w/ IPSEC) - - - Note, that it is not currently possible to use the - Fast IPsec subsystem in lue with the KAME - implementation of IPsec. Consult the &man.fast.ipsec.4; - manual page for more information. + + IPsec + AH + - + IPsec is comprised of the following + sub-protocols: - - IPsec - ESP - + + + Encapsulated Security Payload + (ESP): this protocol + protects the IP packet data from third + party interference by encrypting the contents using + symmetric cryptography algorithms such as Blowfish and + 3DES. + - - IPsec - AH - + + Authentication Header + (AH)): this protocol + protects the IP packet header from third + party interference and spoofing by computing a cryptographic + checksum and hashing the IP packet + header fields with a secure hashing function. This is then + followed by an additional header that contains the hash, to + allow the information in the packet to be + authenticated. + - IPsec consists of two sub-protocols: + + IP Payload Compression Protocol + (IPComp): this protocol + tries to increase communication performance by compressing + the IP payload in order ro reduce the + amount of data sent. + + - - - Encapsulated Security Payload - (ESP), protects the IP packet data from third - party interference, by encrypting the contents using - symmetric cryptography algorithms (like Blowfish, - 3DES). - - - Authentication Header (AH), - protects the IP packet header from third party interference - and spoofing, by computing a cryptographic checksum and - hashing the IP packet header fields with a secure hashing - function. This is then followed by an additional header - that contains the hash, to allow the information in the - packet to be authenticated. - - + These protocols can either be used together or separately, + depending on the environment. - ESP and AH can - either be used together or separately, depending on the - environment. + + VPN + - - VPN - + + virtual private network + VPN + - - virtual private network - VPN - + IPsec supports two modes of operation. + The first mode, Transport Mode, protects + communications between two hosts. The second mode, + Tunnel Mode, is used to build virtual + tunnels, commonly known as Virtual Private Networks + (VPNs). Consult &man.ipsec.4; for detailed + information on the IPsec subsystem in + &os;. + + To add IPsec support to the kernel, add + the following options to the custom kernel configuration file + and rebuild the kernel using the instructions in : - IPsec can either be used to directly encrypt the traffic - between two hosts (known as Transport - Mode); or to build virtual tunnels - between two subnets, which could be used for secure - communication between two corporate networks (known as - Tunnel Mode). The latter is more commonly - known as a Virtual Private Network (VPN). - The &man.ipsec.4; manual page should be consulted for detailed - information on the IPsec subsystem in FreeBSD. + + kernel options + IPSEC + - To add IPsec support to your kernel, add the following - options to your kernel configuration file: + options IPSEC #IP security +device crypto - - kernel options - IPSEC - + + kernel options + IPSEC_DEBUG + - - kernel options - IPSEC_ESP - + If IPsec debugging support is desired, + the following kernel option should also be added: - -options IPSEC #IP security -options IPSEC_ESP #IP security (crypto; define w/ IPSEC) - + options IPSEC_DEBUG #debug for IP security - - kernel options - IPSEC_DEBUG - - - If IPsec debugging support is desired, the following - kernel option should also be added: + This rest of this chapter demonstrates the process of + setting up an IPsec VPN + between a home network and a corporate network. In the example + scenario: - -options IPSEC_DEBUG #debug for IP security - -
+ + + Both sites are connected to the Internet through a + gateway that is running &os;. + - - The Problem + + The gateway on each network has at least one external + IP address. In this example, the + corporate LAN's external + IP address is 172.16.5.4 and the home + LAN's external IP + address is 192.168.1.12. + - There is no standard for what constitutes a VPN. VPNs can - be implemented using a number of different technologies, each of - which have their own strengths and weaknesses. This section - presents a scenario, and the strategies used for implementing a - VPN for this scenario. - + + The internal addresses of the two networks can be either + public or private IP addresses. However, + the address space must not collide. For example, both + networks cannot use 192.168.1.x. In this + example, the corporate LAN's internal + IP address is 10.246.38.1 and the home + LAN's internal IP + address is 10.0.0.5. + + - The Scenario: Two networks, connected to the Internet, to - behave as one - - - VPN - creating - - - The premise is as follows: + + Configuring a <acronym>VPN</acronym> on &os; - - - You have at least two sites - - - Both sites are using IP internally - - - Both sites are connected to the Internet, through a - gateway that is running FreeBSD. - - - The gateway on each network has at least one public IP - address. - - - The internal addresses of the two networks can be - public or private IP addresses, it does not matter. You can - be running NAT on the gateway machine if necessary. - - - The internal IP addresses of the two networks - do not collide. While I expect it is - theoretically possible to use a combination of VPN - technology and NAT to get this to work, I expect it to be a - configuration nightmare. - - - - If you find that you are trying to connect two networks, - both of which, internally, use the same private IP address range - (e.g. both of them use 192.168.1.x), then one of the networks will - have to be renumbered. - - The network topology might look something like this: - - - - - - - -Network #1 [ Internal Hosts ] Private Net, 192.168.1.2-254 - [ Win9x/NT/2K ] - [ UNIX ] - | - | - .---[fxp1]---. Private IP, 192.168.1.1 - | FreeBSD | - `---[fxp0]---' Public IP, A.B.C.D - | - | - -=-=- Internet -=-=- - | - | - .---[fxp0]---. Public IP, W.X.Y.Z - | FreeBSD | - `---[fxp1]---' Private IP, 192.168.2.1 - | - | -Network #2 [ Internal Hosts ] - [ Win9x/NT/2K ] Private Net, 192.168.2.2-254 - [ UNIX ] - - - - Notice the two public IP addresses. I will use the letters to - refer to them in the rest of this article. Anywhere you see those - letters in this article, replace them with your own public IP - addresses. Note also that internally, the two gateway - machines have .1 IP addresses, and that the two networks have - different private IP addresses (192.168.1.x and 192.168.2.x respectively). All the - machines on the private networks have been configured to use the - .1 machine as their default - gateway. - - The intention is that, from a network point of view, each - network should view the machines on the other network as though - they were directly attached the same router -- albeit a slightly - slow router with an occasional tendency to drop packets. - - This means that (for example), machine 192.168.1.20 should be able to run - - ping 192.168.2.34 - - and have it work, transparently. &windows; machines should - be able to see the machines on the other network, browse file - shares, and so on, in exactly the same way that they can browse - machines on the local network. - - And the whole thing has to be secure. This means that - traffic between the two networks has to be encrypted. - - Creating a VPN between these two networks is a multi-step - process. The stages are as follows: - - - - Create a virtual network link between the two - networks, across the Internet. Test it, using tools like - &man.ping.8;, to make sure it works. - - - - Apply security policies to ensure that traffic between - the two networks is transparently encrypted and decrypted as - necessary. Test this, using tools like &man.tcpdump.1;, to - ensure that traffic is encrypted. - - - - Configure additional software on the FreeBSD gateways, - to allow &windows; machines to see one another across the - VPN. - - - - - Step 1: Creating and testing a <quote>virtual</quote> - network link - - Suppose that you were logged in to the gateway machine on - network #1 (with public IP address A.B.C.D, private IP address 192.168.1.1), and you ran ping - 192.168.2.1, which is the private address of the machine - with IP address W.X.Y.Z. What - needs to happen in order for this to work? - - - - The gateway machine needs to know how to reach 192.168.2.1. In other words, it needs - to have a route to 192.168.2.1. - - - Private IP addresses, such as those in the 192.168.x range are not supposed to - appear on the Internet at large. Instead, each packet you - send to 192.168.2.1 will need - to be wrapped up inside another packet. This packet will need - to appear to be from A.B.C.D, - and it will have to be sent to W.X.Y.Z. This process is called - encapsulation. - - - Once this packet arrives at W.X.Y.Z it will need to - unencapsulated, and delivered to 192.168.2.1. - - + + + + Tom + Rhodes + + +
+ trhodes@FreeBSD.org +
+
+ Written by +
+
+
- You can think of this as requiring a tunnel - between the two networks. The two tunnel mouths are the IP - addresses A.B.C.D and W.X.Y.Z, and the tunnel must be told the - addresses of the private IP addresses that will be allowed to pass - through it. The tunnel is used to transfer traffic with private - IP addresses across the public Internet. - - This tunnel is created by using the generic interface, or - gif devices on FreeBSD. As you can - imagine, the gif interface on each - gateway host must be configured with four IP addresses; two for - the public IP addresses, and two for the private IP - addresses. - - Support for the gif device must be compiled in to the - &os; kernel on both machines. You can do this by adding the - line: - - device gif - - to the kernel configuration files on both machines, and - then compile, install, and reboot as normal. - - Configuring the tunnel is a two step process. First the - tunnel must be told what the outside (or public) IP addresses - are, using &man.gifconfig.8;. Then the private IP addresses must be - configured using &man.ifconfig.8;. + To begin, security/ipsec-tools must be + installed from the Ports Collection. This software provides a + number of applications which support the configuration. + + The next requirement is to create two &man.gif.4; + pseudo-devices which will be used to tunnel packets and allow + both networks to communicate properly. As root, run the following + commands, replacing internal and + external with the real IP + addresses of the internal and external interfaces of the two + gateways: + + &prompt.root; ifconfig gif0 create +&prompt.root; ifconfig gif0 internal1 internal2 +&prompt.root; ifconfig gif0 tunnel external1 external2 + + Verify the setup on each gateway, using + ifconfig. Here is the output from Gateway + 1: + + gif0: flags=8051 mtu 1280 +tunnel inet 172.16.5.4 --> 192.168.1.12 +inet6 fe80::2e0:81ff:fe02:5881%gif0 prefixlen 64 scopeid 0x6 +inet 10.246.38.1 --> 10.0.0.5 netmask 0xffffff00 + + Here is the output from Gateway 2: + + gif0: flags=8051 mtu 1280 +tunnel inet 192.168.1.12 --> 172.16.5.4 +inet 10.0.0.5 --> 10.246.38.1 netmask 0xffffff00 +inet6 fe80::250:bfff:fe3a:c1f%gif0 prefixlen 64 scopeid 0x4 + + Once complete, both internal IP + addresses should be reachable using &man.ping.8;: + + priv-net# ping 10.0.0.5 +PING 10.0.0.5 (10.0.0.5): 56 data bytes +64 bytes from 10.0.0.5: icmp_seq=0 ttl=64 time=42.786 ms +64 bytes from 10.0.0.5: icmp_seq=1 ttl=64 time=19.255 ms +64 bytes from 10.0.0.5: icmp_seq=2 ttl=64 time=20.440 ms +64 bytes from 10.0.0.5: icmp_seq=3 ttl=64 time=21.036 ms +--- 10.0.0.5 ping statistics --- +4 packets transmitted, 4 packets received, 0% packet loss +round-trip min/avg/max/stddev = 19.255/25.879/42.786/9.782 ms + +corp-net# ping 10.246.38.1 +PING 10.246.38.1 (10.246.38.1): 56 data bytes +64 bytes from 10.246.38.1: icmp_seq=0 ttl=64 time=28.106 ms +64 bytes from 10.246.38.1: icmp_seq=1 ttl=64 time=42.917 ms +64 bytes from 10.246.38.1: icmp_seq=2 ttl=64 time=127.525 ms +64 bytes from 10.246.38.1: icmp_seq=3 ttl=64 time=119.896 ms +64 bytes from 10.246.38.1: icmp_seq=4 ttl=64 time=154.524 ms +--- 10.246.38.1 ping statistics --- +5 packets transmitted, 5 packets received, 0% packet loss +round-trip min/avg/max/stddev = 28.106/94.594/154.524/49.814 ms + + As expected, both sides have the ability to send and + receive ICMP packets from the privately + configured addresses. Next, both gateways must be told how to + route packets in order to correctly send traffic from either + network. The following commands will achieve this + goal: + + &prompt.root; corp-net# route add 10.0.0.0 10.0.0.5 255.255.255.0 +&prompt.root; corp-net# route add net 10.0.0.0: gateway 10.0.0.5 +&prompt.root; priv-net# route add 10.246.38.0 10.246.38.1 255.255.255.0 +&prompt.root; priv-net# route add host 10.246.38.0: gateway 10.246.38.1 + + At this point, internal machines should be reachable from + each gateway as well as from machines behind the gateways. + Again, use &man.ping.8; to confirm: + + corp-net# ping 10.0.0.8 +PING 10.0.0.8 (10.0.0.8): 56 data bytes +64 bytes from 10.0.0.8: icmp_seq=0 ttl=63 time=92.391 ms +64 bytes from 10.0.0.8: icmp_seq=1 ttl=63 time=21.870 ms +64 bytes from 10.0.0.8: icmp_seq=2 ttl=63 time=198.022 ms +64 bytes from 10.0.0.8: icmp_seq=3 ttl=63 time=22.241 ms +64 bytes from 10.0.0.8: icmp_seq=4 ttl=63 time=174.705 ms +--- 10.0.0.8 ping statistics --- +5 packets transmitted, 5 packets received, 0% packet loss +round-trip min/avg/max/stddev = 21.870/101.846/198.022/74.001 ms + +priv-net# ping 10.246.38.107 +PING 10.246.38.1 (10.246.38.107): 56 data bytes +64 bytes from 10.246.38.107: icmp_seq=0 ttl=64 time=53.491 ms +64 bytes from 10.246.38.107: icmp_seq=1 ttl=64 time=23.395 ms +64 bytes from 10.246.38.107: icmp_seq=2 ttl=64 time=23.865 ms +64 bytes from 10.246.38.107: icmp_seq=3 ttl=64 time=21.145 ms +64 bytes from 10.246.38.107: icmp_seq=4 ttl=64 time=36.708 ms +--- 10.246.38.107 ping statistics --- +5 packets transmitted, 5 packets received, 0% packet loss +round-trip min/avg/max/stddev = 21.145/31.721/53.491/12.179 ms + + Setting up the tunnels is the easy part. Configuring a + secure link is a more in depth process. The following + configuration uses pre-shared (PSK) + RSA keys. Other than the + IP addresses, the + /usr/local/etc/racoon/racoon.conf on both + gateways will be identical and look similar to: + + path pre_shared_key "/usr/local/etc/racoon/psk.txt"; #location of pre-shared key file +log debug; #log verbosity setting: set to 'notify' when testing and debugging is complete + +padding # options are not to be changed +{ + maximum_length 20; + randomize off; + strict_check off; + exclusive_tail off; +} + +timer # timing options. change as needed +{ + counter 5; + interval 20 sec; + persend 1; +# natt_keepalive 15 sec; + phase1 30 sec; + phase2 15 sec; +} + +listen # address [port] that racoon will listen on +{ + isakmp 172.16.5.4 [500]; + isakmp_natt 172.16.5.4 [4500]; +} + +remote 192.168.1.12 [500] +{ + exchange_mode main,aggressive; + doi ipsec_doi; + situation identity_only; + my_identifier address 172.16.5.4; + peers_identifier address 192.168.1.12; + lifetime time 8 hour; + passive off; + proposal_check obey; +# nat_traversal off; + generate_policy off; + + proposal { + encryption_algorithm blowfish; + hash_algorithm md5; + authentication_method pre_shared_key; + lifetime time 30 sec; + dh_group 1; + } +} + +sainfo (address 10.246.38.0/24 any address 10.0.0.0/24 any) # address $network/$netmask $type address $network/$netmask $type ( $type being any or esp) +{ # $network must be the two internal networks you are joining. + pfs_group 1; + lifetime time 36000 sec; + encryption_algorithm blowfish,3des,des; + authentication_algorithm hmac_md5,hmac_sha1; + compression_algorithm deflate; +} + + For descriptions of each available option, refer to the + manual page for racoon.conf. + + The Security Policy Database (SPD) + needs to be configured so that &os; and + racoon are able to encrypt and + decrypt network traffic between the hosts. + + This can be achieved with a shell script, similar to the + following, on the corporate gateway. This file will be used + during system initialization and should be saved as + /usr/local/etc/racoon/setkey.conf. + + flush; +spdflush; +# To the home network +spdadd 10.246.38.0/24 10.0.0.0/24 any -P out ipsec esp/tunnel/172.16.5.4-192.168.1.12/use; +spdadd 10.0.0.0/24 10.246.38.0/24 any -P in ipsec esp/tunnel/192.168.1.12-172.16.5.4/use; + + Once in place, racoon may be + started on both gateways using the following command: + + &prompt.root; /usr/local/sbin/racoon -F -f /usr/local/etc/racoon/racoon.conf -l /var/log/racoon.log + + The output should be similar to the following: + + corp-net# /usr/local/sbin/racoon -F -f /usr/local/etc/racoon/racoon.conf +Foreground mode. +2006-01-30 01:35:47: INFO: begin Identity Protection mode. +2006-01-30 01:35:48: INFO: received Vendor ID: KAME/racoon +2006-01-30 01:35:55: INFO: received Vendor ID: KAME/racoon +2006-01-30 01:36:04: INFO: ISAKMP-SA established 172.16.5.4[500]-192.168.1.12[500] spi:623b9b3bd2492452:7deab82d54ff704a +2006-01-30 01:36:05: INFO: initiate new phase 2 negotiation: 172.16.5.4[0]192.168.1.12[0] +2006-01-30 01:36:09: INFO: IPsec-SA established: ESP/Tunnel 192.168.1.12[0]->172.16.5.4[0] spi=28496098(0x1b2d0e2) +2006-01-30 01:36:09: INFO: IPsec-SA established: ESP/Tunnel 172.16.5.4[0]->192.168.1.12[0] spi=47784998(0x2d92426) +2006-01-30 01:36:13: INFO: respond new phase 2 negotiation: 172.16.5.4[0]192.168.1.12[0] +2006-01-30 01:36:18: INFO: IPsec-SA established: ESP/Tunnel 192.168.1.12[0]->172.16.5.4[0] spi=124397467(0x76a279b) +2006-01-30 01:36:18: INFO: IPsec-SA established: ESP/Tunnel 172.16.5.4[0]->192.168.1.12[0] spi=175852902(0xa7b4d66) + + To ensure the tunnel is working properly, switch to + another console and use &man.tcpdump.1; to view network + traffic using the following command. Replace + em0 with the network interface card as + required: + + &prompt.root; tcpdump -i em0 host 172.16.5.4 and dst 192.168.1.12 + + Data similar to the following should appear on the + console. If not, there is an issue and debugging the + returned data will be required. + + 01:47:32.021683 IP corporatenetwork.com > 192.168.1.12.privatenetwork.com: ESP(spi=0x02acbf9f,seq=0xa) +01:47:33.022442 IP corporatenetwork.com > 192.168.1.12.privatenetwork.com: ESP(spi=0x02acbf9f,seq=0xb) +01:47:34.024218 IP corporatenetwork.com > 192.168.1.12.privatenetwork.com: ESP(spi=0x02acbf9f,seq=0xc) + + At this point, both networks should be available and seem + to be part of the same network. Most likely both networks are + protected by a firewall. To allow traffic to flow between + them, rules need to be added to pass packets. For the + &man.ipfw.8; firewall, add the following lines to the firewall + configuration file: + + ipfw add 00201 allow log esp from any to any +ipfw add 00202 allow log ah from any to any +ipfw add 00203 allow log ipencap from any to any +ipfw add 00204 allow log udp from any 500 to any - In &os; 5.X, the functionality provided by the - &man.gifconfig.8; utility has been merged into - &man.ifconfig.8;. - - On the gateway machine on network #1 you would run the - following two commands to configure the tunnel. - - gifconfig gif0 A.B.C.D W.X.Y.Z -ifconfig gif0 inet 192.168.1.1 192.168.2.1 netmask 0xffffffff - - - On the other gateway machine you run the same commands, - but with the order of the IP addresses reversed. - - gifconfig gif0 W.X.Y.Z A.B.C.D -ifconfig gif0 inet 192.168.2.1 192.168.1.1 netmask 0xffffffff - - - You can then run: - - gifconfig gif0 - - to see the configuration. For example, on the network #1 - gateway, you would see this: - - &prompt.root; gifconfig gif0 -gif0: flags=8011<UP,POINTTOPOINT,MULTICAST> mtu 1280 -inet 192.168.1.1 --> 192.168.2.1 netmask 0xffffffff -physical address inet A.B.C.D --> W.X.Y.Z - - - As you can see, a tunnel has been created between the - physical addresses A.B.C.D and - W.X.Y.Z, and the traffic allowed - through the tunnel is that between 192.168.1.1 and 192.168.2.1. - - This will also have added an entry to the routing table - on both machines, which you can examine with the command netstat -rn. - This output is from the gateway host on network #1. - - &prompt.root; netstat -rn -Routing tables - -Internet: -Destination Gateway Flags Refs Use Netif Expire -... -192.168.2.1 192.168.1.1 UH 0 0 gif0 -... - - - As the Flags value indicates, this is a - host route, which means that each gateway knows how to reach the - other gateway, but they do not know how to reach the rest of - their respective networks. That problem will be fixed - shortly. - - It is likely that you are running a firewall on both - machines. This will need to be circumvented for your VPN - traffic. You might want to allow all traffic between both - networks, or you might want to include firewall rules that - protect both ends of the VPN from one another. - - It greatly simplifies testing if you configure the - firewall to allow all traffic through the VPN. You can always - tighten things up later. If you are using &man.ipfw.8; on the - gateway machines then a command like - - ipfw add 1 allow ip from any to any via gif0 - - will allow all traffic between the two end points of the - VPN, without affecting your other firewall rules. Obviously - you will need to run this command on both gateway hosts. - - This is sufficient to allow each gateway machine to ping - the other. On 192.168.1.1, you - should be able to run - - ping 192.168.2.1 - - and get a response, and you should be able to do the same - thing on the other gateway machine. - - However, you will not be able to reach internal machines - on either network yet. This is because of the routing -- - although the gateway machines know how to reach one another, - they do not know how to reach the network behind each one. - - To solve this problem you must add a static route on each - gateway machine. The command to do this on the first gateway - would be: - - route add 192.168.2.0 192.168.2.1 netmask 0xffffff00 - - - This says In order to reach the hosts on the - network 192.168.2.0, send the - packets to the host 192.168.2.1. You will need to - run a similar command on the other gateway, but with the - 192.168.1.x addresses - instead. - - IP traffic from hosts on one network will now be able to - reach hosts on the other network. - - That has now created two thirds of a VPN between the two - networks, in as much as it is virtual and it is a - network. It is not private yet. You can test - this using &man.ping.8; and &man.tcpdump.1;. Log in to the - gateway host and run - - tcpdump dst host 192.168.2.1 - - In another log in session on the same host run - - ping 192.168.2.1 - - You will see output that looks something like this: - - -16:10:24.018080 192.168.1.1 > 192.168.2.1: icmp: echo request -16:10:24.018109 192.168.1.1 > 192.168.2.1: icmp: echo reply -16:10:25.018814 192.168.1.1 > 192.168.2.1: icmp: echo request -16:10:25.018847 192.168.1.1 > 192.168.2.1: icmp: echo reply -16:10:26.028896 192.168.1.1 > 192.168.2.1: icmp: echo request -16:10:26.029112 192.168.1.1 > 192.168.2.1: icmp: echo reply - - - As you can see, the ICMP messages are going back and forth - unencrypted. If you had used the parameter to - &man.tcpdump.1; to grab more bytes of data from the packets you - would see more information. - - Obviously this is unacceptable. The next section will - discuss securing the link between the two networks so that it - all traffic is automatically encrypted. - - - Summary: - - Configure both kernels with pseudo-device - gif. - - - Edit /etc/rc.conf on gateway host - #1 and add the following lines (replacing IP addresses as - necessary). - gifconfig_gif0="A.B.C.D W.X.Y.Z" -ifconfig_gif0="inet 192.168.1.1 192.168.2.1 netmask 0xffffffff" -static_routes="vpn" -route_vpn="192.168.2.0 192.168.2.1 netmask 0xffffff00" - - - - - Edit your firewall script - (/etc/rc.firewall, or similar) on both - hosts, and add - - ipfw add 1 allow ip from any to any via gif0 - - - Make similar changes to - /etc/rc.conf on gateway host #2, - reversing the order of IP addresses. - - - - - - Step 2: Securing the link - - To secure the link we will be using IPsec. IPsec provides - a mechanism for two hosts to agree on an encryption key, and to - then use this key in order to encrypt data between the two - hosts. - - The are two areas of configuration to be considered here. - - - - There must be a mechanism for two hosts to agree on the - encryption mechanism to use. Once two hosts have agreed on - this mechanism there is said to be a security association - between them. - - - There must be a mechanism for specifying which traffic - should be encrypted. Obviously, you do not want to encrypt - all your outgoing traffic -- you only want to encrypt the - traffic that is part of the VPN. The rules that you put in - place to determine what traffic will be encrypted are called - security policies. - - - - Security associations and security policies are both - maintained by the kernel, and can be modified by userland - programs. However, before you can do this you must configure the - kernel to support IPsec and the Encapsulated Security Payload - (ESP) protocol. This is done by configuring a kernel with: - - - kernel options - IPSEC - - - options IPSEC -options IPSEC_ESP - - - and recompiling, reinstalling, and rebooting. As before - you will need to do this to the kernels on both of the gateway - hosts. - - - IKE - - - You have two choices when it comes to setting up security - associations. You can configure them by hand between two hosts, - which entails choosing the encryption algorithm, encryption keys, - and so forth, or you can use daemons that implement the Internet - Key Exchange protocol (IKE) to do this for you. - - I recommend the latter. Apart from anything else, it is - easier to set up. - - - IPsec - security policies - - - - setkey - - - Editing and displaying security policies is carried out - using &man.setkey.8;. By analogy, setkey is - to the kernel's security policy tables as &man.route.8; is to - the kernel's routing tables. setkey can - also display the current security associations, and to continue - the analogy further, is akin to netstat -r - in that respect. - - There are a number of choices for daemons to manage - security associations with FreeBSD. This article will describe - how to use one of these, racoon — which is available from - security/ipsec-tools in the &os; Ports - collection. - - - racoon - - - The racoon software must be run on both gateway hosts. On each host it - is configured with the IP address of the other end of the VPN, - and a secret key (which you choose, and must be the same on both - gateways). - - The two daemons then contact one another, confirm that they - are who they say they are (by using the secret key that you - configured). The daemons then generate a new secret key, and use - this to encrypt the traffic over the VPN. They periodically - change this secret, so that even if an attacker were to crack one - of the keys (which is as theoretically close to unfeasible as it - gets) it will not do them much good -- by the time they have cracked - the key the two daemons have chosen another one. - - The configuration file for racoon is stored in - ${PREFIX}/etc/racoon. You should find a - configuration file there, which should not need to be changed - too much. The other component of racoon's configuration, - which you will need to change, is the pre-shared - key. - - The default racoon configuration expects to find this in - the file ${PREFIX}/etc/racoon/psk.txt. It is important to note - that the pre-shared key is not the key that will be used to - encrypt your traffic across the VPN link, it is simply a token - that allows the key management daemons to trust one another. - - psk.txt contains a line for each - remote site you are dealing with. In this example, where there - are two sites, each psk.txt file will contain one line (because - each end of the VPN is only dealing with one other end). - - On gateway host #1 this line should look like this: - - W.X.Y.Z secret - - That is, the public IP address of the remote end, - whitespace, and a text string that provides the secret. - Obviously, you should not use secret as your key -- the normal - rules for choosing a password apply. - - On gateway host #2 the line would look like this - - A.B.C.D secret - - That is, the public IP address of the remote end, and the - same secret key. psk.txt must be mode - 0600 (i.e., only read/write to - root) before racoon will run. - - You must run racoon on both gateway machines. You will - also need to add some firewall rules to allow the IKE traffic, - which is carried over UDP to the ISAKMP (Internet Security Association - Key Management Protocol) port. Again, this should be fairly early in - your firewall ruleset. - - ipfw add 1 allow udp from A.B.C.D to W.X.Y.Z isakmp -ipfw add 1 allow udp from W.X.Y.Z to A.B.C.D isakmp - - - Once racoon is running you can try pinging one gateway host - from the other. The connection is still not encrypted, but - racoon will then set up the security associations between the two - hosts -- this might take a moment, and you may see this as a - short delay before the ping commands start responding. - - Once the security association has been set up you can - view it using &man.setkey.8;. Run - - setkey -D - - on either host to view the security association information. - - That's one half of the problem. They other half is setting - your security policies. - - To create a sensible security policy, let's review what's - been set up so far. This discussions hold for both ends of the - link. - - Each IP packet that you send out has a header that contains - data about the packet. The header includes the IP addresses of - both the source and destination. As we already know, private IP - addresses, such as the 192.168.x.y - range are not supposed to appear on the public Internet. - Instead, they must first be encapsulated inside another packet. - This packet must have the public source and destination IP - addresses substituted for the private addresses. - - So if your outgoing packet started looking like this: - - - - - - - - - .----------------------. - | Src: 192.168.1.1 | - | Dst: 192.168.2.1 | - | <other header info> | - +----------------------+ - | <packet data> | - `----------------------' - - - - Then it will be encapsulated inside another packet, looking - something like this: - - - - - - - - - .--------------------------. - | Src: A.B.C.D | - | Dst: W.X.Y.Z | - | <other header info> | - +--------------------------+ - | .----------------------. | - | | Src: 192.168.1.1 | | - | | Dst: 192.168.2.1 | | - | | <other header info> | | - | +----------------------+ | - | | <packet data> | | - | `----------------------' | - `--------------------------' - - - - This encapsulation is carried out by the - gif device. As - you can see, the packet now has real IP addresses on the outside, - and our original packet has been wrapped up as data inside the - packet that will be put out on the Internet. - - Obviously, we want all traffic between the VPNs to be - encrypted. You might try putting this in to words, as: - - If a packet leaves from A.B.C.D, and it is destined for W.X.Y.Z, then encrypt it, using the - necessary security associations. - - If a packet arrives from W.X.Y.Z, and it is destined for A.B.C.D, then decrypt it, using the - necessary security associations. - - That's close, but not quite right. If you did this, all - traffic to and from W.X.Y.Z, even - traffic that was not part of the VPN, would be encrypted. That's - not quite what you want. The correct policy is as follows - - If a packet leaves from A.B.C.D, and that packet is encapsulating - another packet, and it is destined for W.X.Y.Z, then encrypt it, using the - necessary security associations. - - If a packet arrives from W.X.Y.Z, and that packet is encapsulating - another packet, and it is destined for A.B.C.D, then decrypt it, using the - necessary security associations. - - A subtle change, but a necessary one. - - Security policies are also set using &man.setkey.8;. - &man.setkey.8; features a configuration language for defining the - policy. You can either enter configuration instructions via - stdin, or you can use the option to specify a - filename that contains configuration instructions. - - The configuration on gateway host #1 (which has the public - IP address A.B.C.D) to force all - outbound traffic to W.X.Y.Z to be - encrypted is: - - -spdadd A.B.C.D/32 W.X.Y.Z/32 ipencap -P out ipsec esp/tunnel/A.B.C.D-W.X.Y.Z/require; - - - Put these commands in a file (e.g. - /etc/ipsec.conf) and then run - - &prompt.root; setkey -f /etc/ipsec.conf - - tells &man.setkey.8; that we want - to add a rule to the secure policy database. The rest of this - line specifies which packets will match this policy. A.B.C.D/32 and W.X.Y.Z/32 are the IP addresses and - netmasks that identify the network or hosts that this policy will - apply to. In this case, we want it to apply to traffic between - these two hosts. tells the kernel that - this policy should only apply to packets that encapsulate other - packets. says that this policy applies - to outgoing packets, and says that the - packet will be secured. - - The second line specifies how this packet will be - encrypted. is the protocol that will be - used, while indicates that the packet - will be further encapsulated in an IPsec packet. The repeated - use of A.B.C.D and W.X.Y.Z is used to select the security - association to use, and the final - mandates that packets must be encrypted if they match this - rule. - - This rule only matches outgoing packets. You will need a - similar rule to match incoming packets. - - spdadd W.X.Y.Z/32 A.B.C.D/32 ipencap -P in ipsec esp/tunnel/W.X.Y.Z-A.B.C.D/require; - - Note the instead of - in this case, and the necessary reversal of - the IP addresses. - - The other gateway host (which has the public IP address - W.X.Y.Z) will need similar rules. - - spdadd W.X.Y.Z/32 A.B.C.D/32 ipencap -P out ipsec esp/tunnel/W.X.Y.Z-A.B.C.D/require; -spdadd A.B.C.D/32 W.X.Y.Z/32 ipencap -P in ipsec esp/tunnel/A.B.C.D-W.X.Y.Z/require; - - Finally, you need to add firewall rules to allow ESP and - IPENCAP packets back and forth. These rules will need to be - added to both hosts. - - ipfw add 1 allow esp from A.B.C.D to W.X.Y.Z -ipfw add 1 allow esp from W.X.Y.Z to A.B.C.D -ipfw add 1 allow ipencap from A.B.C.D to W.X.Y.Z -ipfw add 1 allow ipencap from W.X.Y.Z to A.B.C.D - - - Because the rules are symmetric you can use the same rules - on each gateway host. - - Outgoing packets will now look something like this: - - - - - - - - - .------------------------------. --------------------------. - | Src: A.B.C.D | | - | Dst: W.X.Y.Z | | - | <other header info> | | Encrypted - +------------------------------+ | packet. - | .--------------------------. | -------------. | contents - | | Src: A.B.C.D | | | | are - | | Dst: W.X.Y.Z | | | | completely - | | <other header info> | | | |- secure - | +--------------------------+ | | Encap'd | from third - | | .----------------------. | | -. | packet | party - | | | Src: 192.168.1.1 | | | | Original |- with real | snooping - | | | Dst: 192.168.2.1 | | | | packet, | IP addr | - | | | <other header info> | | | |- private | | - | | +----------------------+ | | | IP addr | | - | | | <packet data> | | | | | | - | | `----------------------' | | -' | | - | `--------------------------' | -------------' | - `------------------------------' --------------------------' - - - - - When they are received by the far end of the VPN they will - first be decrypted (using the security associations that have - been negotiated by racoon). Then they will enter the - gif interface, which will unwrap - the second layer, until you are left with the innermost - packet, which can then travel in to the inner network. - - You can check the security using the same &man.ping.8; test from - earlier. First, log in to the - A.B.C.D gateway machine, and - run: - - tcpdump dst host 192.168.2.1 - - In another log in session on the same host run - - ping 192.168.2.1 - - This time you should see output like the following: - - XXX tcpdump output - - Now, as you can see, &man.tcpdump.1; shows the ESP packets. If - you try to examine them with the option you will see - (apparently) gibberish, because of the encryption. - - Congratulations. You have just set up a VPN between two - remote sites. + The rule numbers may need to be altered depending on the + current host configuration. + - - Summary - - Configure both kernels with: - - options IPSEC -options IPSEC_ESP - - - - Install security/ipsec-tools. Edit - ${PREFIX}/etc/racoon/psk.txt on both - gateway hosts, adding an entry for the remote host's IP - address and a secret key that they both know. Make sure - this file is mode 0600. - - - Add the following lines to - /etc/rc.conf on each host: - - ipsec_enable="YES" -ipsec_file="/etc/ipsec.conf" - - - - Create an /etc/ipsec.conf on each - host that contains the necessary spdadd lines. On gateway - host #1 this would be: - - -spdadd A.B.C.D/32 W.X.Y.Z/32 ipencap -P out ipsec - esp/tunnel/A.B.C.D-W.X.Y.Z/require; -spdadd W.X.Y.Z/32 A.B.C.D/32 ipencap -P in ipsec - esp/tunnel/W.X.Y.Z-A.B.C.D/require; - - - On gateway host #2 this would be: - - -spdadd W.X.Y.Z/32 A.B.C.D/32 ipencap -P out ipsec - esp/tunnel/W.X.Y.Z-A.B.C.D/require; -spdadd A.B.C.D/32 W.X.Y.Z/32 ipencap -P in ipsec - esp/tunnel/A.B.C.D-W.X.Y.Z/require; - - - - Add firewall rules to allow IKE, ESP, and IPENCAP - traffic to both hosts: - - -ipfw add 1 allow udp from A.B.C.D to W.X.Y.Z isakmp -ipfw add 1 allow udp from W.X.Y.Z to A.B.C.D isakmp -ipfw add 1 allow esp from A.B.C.D to W.X.Y.Z -ipfw add 1 allow esp from W.X.Y.Z to A.B.C.D -ipfw add 1 allow ipencap from A.B.C.D to W.X.Y.Z -ipfw add 1 allow ipencap from W.X.Y.Z to A.B.C.D - - - + For users of &man.pf.4; or &man.ipf.8;, the following + rules should do the trick: - The previous two steps should suffice to get the VPN up and - running. Machines on each network will be able to refer to one - another using IP addresses, and all traffic across the link will - be automatically and securely encrypted. - + pass in quick proto esp from any to any +pass in quick proto ah from any to any +pass in quick proto ipencap from any to any +pass in quick proto udp from any port = 500 to any port = 500 +pass in quick on gif0 from any to any +pass out quick proto esp from any to any +pass out quick proto ah from any to any +pass out quick proto ipencap from any to any +pass out quick proto udp from any port = 500 to any port = 500 +pass out quick on gif0 from any to any + + Finally, to allow the machine to start support for the + VPN during system initialization, add the + following lines to /etc/rc.conf: + + ipsec_enable="YES" +ipsec_program="/usr/local/sbin/setkey" +ipsec_file="/usr/local/etc/racoon/setkey.conf" # allows setting up spd policies on boot +racoon_enable="yes"
- OpenSSH + + OpenSSH + - ChernLeeContributed by + ChernLeeContributed + by - OpenSSH security OpenSSH - OpenSSH is a set of network connectivity tools used to - access remote machines securely. It can be used as a direct - replacement for rlogin, - rsh, rcp, and - telnet. Additionally, TCP/IP - connections can be tunneled/forwarded securely through SSH. - OpenSSH encrypts all traffic to effectively eliminate eavesdropping, - connection hijacking, and other network-level attacks. - - OpenSSH is maintained by the OpenBSD project, and is based - upon SSH v1.2.12 with all the recent bug fixes and updates. It - is compatible with both SSH protocols 1 and 2. OpenSSH has been - in the base system since FreeBSD 4.0. - - - Advantages of Using OpenSSH - - Normally, when using &man.telnet.1; or &man.rlogin.1;, - data is sent over the network in an clear, un-encrypted form. - Network sniffers anywhere in between the client and server can - steal your user/password information or data transferred in - your session. OpenSSH offers a variety of authentication and - encryption methods to prevent this from happening. - - - - Enabling sshd - - OpenSSH - enabling - - - The sshd daemon is enabled by - default on &os; 4.X. In &os; 5.X and later enabling - sshd is an option presented during - a Standard install of &os;. To see if - sshd is enabled, check the - rc.conf file for: - sshd_enable="YES" - This will load &man.sshd.8;, the daemon program for OpenSSH, - the next time your system initializes. Alternatively, you can - simply run directly the sshd daemon by typing sshd on the command line. - - - - SSH Client - - OpenSSH - client - - - The &man.ssh.1; utility works similarly to - &man.rlogin.1;. - - &prompt.root; ssh user@example.com -Host key not found from the list of known hosts. + OpenSSH is a set of network + connectivity tools used to provide secure access to remote + machines. Additionally, TCP/IP connections + can be tunneled or forwarded securely through + SSH connections. + OpenSSH encrypts all traffic to + effectively eliminate eavesdropping, connection hijacking, and + other network-level attacks. + + OpenSSH is maintained by the + OpenBSD project and is installed by default in &os;. It is + compatible with both SSH version 1 and 2 + protocols. + + When data is sent over the network in an unencrypted form, + network sniffers anywhere in between the client and server can + steal user/password information or data transferred during the + session. OpenSSH offers a variety of + authentication and encryption methods to prevent this from + happening. More information about + OpenSSH is available from http://www.openssh.com/. + + This section provides an overview of the built-in client + utilities to securely access other systems and securely transfer + files from a &os; system. It then describes how to configure a + SSH server on a &os; system. More + information is available in the man pages mentioned in this + chapter. + + + Using the SSH Client Utilities + + + OpenSSH + client + + + To log into a SSH server, use + ssh and specify a username that exists on + that server and the IP address or hostname + of the server. If this is the first time a connection has + been made to the specified server, the user will be prompted + to first verify the server's fingerprint: + + &prompt.root; ssh user@example.com +The authenticity of host 'example.com (10.0.0.1)' can't be established. +ECDSA key fingerprint is 25:cc:73:b5:b3:96:75:3d:56:19:49:d2:5c:1f:91:3b. Are you sure you want to continue connecting (yes/no)? yes -Host 'example.com' added to the list of known hosts. -user@example.com's password: ******* - - The login will continue just as it would have if a session was - created using rlogin or - telnet. SSH utilizes a key fingerprint - system for verifying the authenticity of the server when the - client connects. The user is prompted to enter - yes only when - connecting for the first time. Future attempts to login are all - verified against the saved fingerprint key. The SSH client - will alert you if the saved fingerprint differs from the - received fingerprint on future login attempts. The fingerprints - are saved in ~/.ssh/known_hosts, or - ~/.ssh/known_hosts2 for SSH v2 - fingerprints. - - By default, recent versions of the - OpenSSH servers only accept SSH v2 - connections. The client will use version 2 if possible and - will fall back to version 1. The client can also be forced to - use one or the other by passing it the or - for version 1 or version 2, respectively. - The version 1 compatability is maintained in the client for - backwards compatability with older versions. - - - - Secure Copy - - OpenSSH - secure copy - - scp +Permanently added 'example.com' (ECDSA) to the list of known hosts. +Password for user@example.com: user_password - The &man.scp.1; command works similarly to - &man.rcp.1;; it copies a file to or from a remote machine, - except in a secure fashion. + SSH utilizes a key fingerprint system + to verify the authenticity of the server when the client + connects. When the user accepts the key's fingerprint by + typing yes when connecting for the first + time, a copy of the key is saved to + .ssh/known_hosts in the user's home + directory. Future attempts to login are verified against the + saved key and ssh will display an alert if + the server's key does not match the saved key. If this + occurs, the user should first verify why the key has changed + before continuing with the connection. + + By default, recent versions of + OpenSSH only accept + SSHv2 connections. By default, the client + will use version 2 if possible and will fall back to version 1 + if the server does not support version 2. To force + ssh to only use the specified protocol, + include or . + Additional options are described in &man.ssh.1;. + + + OpenSSH + secure copy + + + &man.scp.1; + + + Use &man.scp.1; to securely copy a file to or from a + remote machine. This example copies + COPYRIGHT on the remote system to a file + of the same name in the current directory of the local + system: - &prompt.root; scp user@example.com:/COPYRIGHT COPYRIGHT -user@example.com's password: ******* + &prompt.root; scp user@example.com:/COPYRIGHT COPYRIGHT +Password for user@example.com: ******* COPYRIGHT 100% |*****************************| 4735 00:00 &prompt.root; - Since the fingerprint was already saved for this host in the - previous example, it is verified when using &man.scp.1; - here. - - The arguments passed to &man.scp.1; are similar - to &man.cp.1;, with the file or files in the first - argument, and the destination in the second. Since the file is - fetched over the network, through SSH, one or more of the file - arguments takes on the form - . - - - - - Configuration - - OpenSSH - configuration - - The system-wide configuration files for both the - OpenSSH daemon and client reside - within the /etc/ssh directory. - - ssh_config configures the client - settings, while sshd_config configures the - daemon. - - Additionally, the - (/usr/sbin/sshd by default), and - rc.conf - options can provide more levels of configuration. - - - - ssh-keygen + Since the fingerprint was already verified for this host, + the server's key is automatically checked before prompting for + the user's password. + + The arguments passed to scp are similar + to cp. The file or files to copy is the + first argument and the destination to copy to is the second. + Since the file is fetched over the network, one or more of the + file arguments takes the form + . Be + aware when copying directories recursively that + scp uses , whereas + cp uses . + + To open an interactive session for copying files, use + sftp. Refer to &man.sftp.1; for a list of + available commands while in an sftp + session. + + + Key-based Authentication + + Instead of using passwords, a client can be configured + to connect to the remote machine using keys. To generate + DSA or RSA + authentication keys, use ssh-keygen. To + generate a public and private key pair, specify the type of + key and follow the prompts. It is recommended to protect + the keys with a memorable, but hard to guess + passphrase. - Instead of using passwords, &man.ssh-keygen.1; can - be used to generate DSA or RSA keys to authenticate a user: - - &prompt.user; ssh-keygen -t dsa + &prompt.user; ssh-keygen -t dsa Generating public/private dsa key pair. Enter file in which to save the key (/home/user/.ssh/id_dsa): Created directory '/home/user/.ssh'. -Enter passphrase (empty for no passphrase): -Enter same passphrase again: +Enter passphrase (empty for no passphrase): type some passphrase here which can contain spaces +Enter same passphrase again: type some passphrase here which can contain spaces Your identification has been saved in /home/user/.ssh/id_dsa. Your public key has been saved in /home/user/.ssh/id_dsa.pub. The key fingerprint is: -bb:48:db:f2:93:57:80:b6:aa:bc:f5:d5:ba:8f:79:17 user@host.example.com - +bb:48:db:f2:93:57:80:b6:aa:bc:f5:d5:ba:8f:79:17 user@host.example.com - &man.ssh-keygen.1; will create a public and private - key pair for use in authentication. The private key is stored in - ~/.ssh/id_dsa or - ~/.ssh/id_rsa, whereas the public key is - stored in ~/.ssh/id_dsa.pub or - ~/.ssh/id_rsa.pub, respectively for DSA and - RSA key types. The public key must be placed in - ~/.ssh/authorized_keys of the remote - machine in order for the setup to work. Similarly, RSA version - 1 public keys should be placed in - ~/.ssh/authorized_keys. - - This will allow connection to the remote machine based upon - SSH keys instead of passwords. - - If a passphrase is used in &man.ssh-keygen.1;, the user - will be prompted for a password each time in order to use the - private key. &man.ssh-agent.1; can alleviate the strain of - repeatedly entering long passphrases, and is explored in the - section below. - - The various options and files can be different - according to the OpenSSH version - you have on your system; to avoid problems you should consult - the &man.ssh-keygen.1; manual page. - + Depending upon the specified protocol, the private key + is stored in ~/.ssh/id_dsa (or + ~/.ssh/id_rsa), and the public key + is stored in ~/.ssh/id_dsa.pub (or + ~/.ssh/id_rsa.pub). The + public key must be first copied to + ~/.ssh/authorized_keys on the remote + machine in order for key-based authentication to + work. - - ssh-agent and ssh-add + + Many users believe that keys are secure by design and + will use a key without a passphrase. This is + dangerous behavior. An + administrator can verify that a key pair is protected by a + passphrase by viewing the private key manually. If the + private key file contains the word + ENCRYPTED, the key owner is using a + passphrase. In addition, to better secure end users, + from may be placed in the public key + file. For example, adding + from="192.168.10.5" in the front of + ssh-rsa or rsa-dsa + prefix will only allow that specific user to login from + that IP address. + - The &man.ssh-agent.1; and &man.ssh-add.1; utilities provide - methods for SSH keys to be loaded - into memory for use, without needing to type the passphrase - each time. - - The &man.ssh-agent.1; utility will handle the authentication - using the private key(s) that are loaded into it. - &man.ssh-agent.1; should be used to launch another application. - At the most basic level, it could spawn a shell or at a more - advanced level, a window manager. - - To use &man.ssh-agent.1; in a shell, first it will need to - be spawned with a shell as an argument. Secondly, the - identity needs to be added by running &man.ssh-add.1; and - providing it the passphrase for the private key. Once these - steps have been completed the user will be able to &man.ssh.1; - to any host that has the corresponding public key installed. - For example: + The various options and files can be different + according to the OpenSSH version. + To avoid problems, consult &man.ssh-keygen.1;. + + If a passphrase is used, the user will be prompted for + the passphrase each time a connection is made to the server. + To load SSH keys into memory, without + needing to type the passphrase each time, use + &man.ssh-agent.1; and &man.ssh-add.1;. + + Authentication is handled by + ssh-agent, using the private key(s) that + are loaded into it. Then, ssh-agent + should be used to launch another application such as a + shell or a window manager. + + To use ssh-agent in a shell, start it + with a shell as an argument. Next, add the identity by + running ssh-add and providing it the + passphrase for the private key. Once these steps have been + completed, the user will be able to ssh + to any host that has the corresponding public key installed. + For example: - &prompt.user; ssh-agent csh + &prompt.user; ssh-agent csh &prompt.user; ssh-add -Enter passphrase for /home/user/.ssh/id_dsa: -Identity added: /home/user/.ssh/id_dsa (/home/user/.ssh/id_dsa) +Enter passphrase for key '/usr/home/user/.ssh/id_dsa': type passphrase here +Identity added: /usr/home/user/.ssh/id_dsa (/usr/home/user/.ssh/id_dsa) &prompt.user; - To use &man.ssh-agent.1; in X11, a call to - &man.ssh-agent.1; will need to be placed in - ~/.xinitrc. This will provide the - &man.ssh-agent.1; services to all programs launched in X11. - An example ~/.xinitrc file might look - like this: - - exec ssh-agent startxfce4 - - This would launch &man.ssh-agent.1;, which would in turn - launch XFCE, every time X11 starts. - Then once that is done and X11 has been restarted so that the - changes can take effect, simply run &man.ssh-add.1; to load - all of your SSH keys. - + To use ssh-agent in + &xorg;, add an entry for it in + ~/.xinitrc. This provides the + ssh-agent services to all programs + launched in &xorg;. An example + ~/.xinitrc might look like this: + + exec ssh-agent startxfce4 + + This launches ssh-agent, which in + turn launches XFCE, every time + &xorg; starts. Once + &xorg; has been restarted so that + the changes can take effect, run ssh-add + to load all of the SSH keys. + - - SSH Tunneling - - OpenSSH - tunneling - + + <acronym>SSH</acronym> Tunneling - OpenSSH has the ability to create a tunnel to encapsulate - another protocol in an encrypted session. + + OpenSSH + tunneling + - The following command tells &man.ssh.1; to create a tunnel - for telnet: + OpenSSH has the ability to + create a tunnel to encapsulate another protocol in an + encrypted session. + + The following command tells ssh to + create a tunnel for + telnet: - &prompt.user; ssh -2 -N -f -L 5023:localhost:23 user@foo.example.com + &prompt.user; ssh -2 -N -f -L 5023:localhost:23 user@foo.example.com &prompt.user; - The ssh command is used with the - following options: + This example uses the following options: + + + + + + + Forces ssh to use version 2 to + connect to the server. + + + + + + + + Indicates no command, or tunnel only. If omitted, + ssh initiates a normal + session. + + + + + - - - - - - Forces ssh to use version 2 of - the protocol. (Do not use if you are working with older - SSH servers) - - - - - - - - Indicates no command, or tunnel only. If omitted, - ssh would initiate a normal - session. - - - - - - - - Forces ssh to run in the - background. - - - - - - - - Indicates a local tunnel in - localport:remotehost:remoteport - fashion. - + + Forces ssh to run in the + background. + - - + + + + + Indicates a local tunnel in + localport:remotehost:remoteport + format. + + - - The remote SSH server. - - - - - - An SSH tunnel works by creating a listen socket on - localhost on the specified port. - It then forwards any connection received - on the local host/port via the SSH connection to the specified - remote host and port. - - In the example, port 5023 on - localhost is being forwarded to port - 23 on localhost - of the remote machine. Since 23 is telnet, - this would create a secure telnet session through an SSH tunnel. + + - This can be used to wrap any number of insecure TCP - protocols such as SMTP, POP3, FTP, etc. + + The login name to use on the specified remote + SSH server. + + + - - Using SSH to Create a Secure Tunnel for SMTP + An SSH tunnel works by creating a + listen socket on localhost on the + specified localport. It then forwards + any connections received on localport via + the SSH connection to the specified + remotehost:remoteport. In the example, + port 5023 on the client is forwarded to + port 23 on the remote machine. Since + port 23 is used by telnet, this + creates an encrypted telnet + session through an SSH tunnel. + + This method can be used to wrap any number of insecure + TCP protocols such as + SMTP, POP3, and + FTP, as seen in the following + examples. + + + Create a Secure Tunnel for + <acronym>SMTP</acronym> - &prompt.user; ssh -2 -N -f -L 5025:localhost:25 user@mailserver.example.com + &prompt.user; ssh -2 -N -f -L 5025:localhost:25 user@mailserver.example.com user@mailserver.example.com's password: ***** &prompt.user; telnet localhost 5025 Trying 127.0.0.1... @@ -4411,210 +2826,261 @@ Escape character is '^]'. 220 mailserver.example.com ESMTP - This can be used in conjunction with an - &man.ssh-keygen.1; and additional user accounts to create a - more seamless/hassle-free SSH tunneling environment. Keys - can be used in place of typing a password, and the tunnels - can be run as a separate user. - - - - Practical SSH Tunneling Examples - - - Secure Access of a POP3 Server + This can be used in conjunction with + ssh-keygen and additional user accounts + to create a more seamless SSH tunneling + environment. Keys can be used in place of typing a + password, and the tunnels can be run as a separate + user. + - At work, there is an SSH server that accepts - connections from the outside. On the same office network - resides a mail server running a POP3 server. The network, - or network path between your home and office may or may not - be completely trustable. Because of this, you need to check - your e-mail in a secure manner. The solution is to create - an SSH connection to your office's SSH server, and tunnel - through to the mail server. + + Secure Access of a <acronym>POP3</acronym> + Server + + In this example, there is an SSH + server that accepts connections from the outside. On the + same network resides a mail server running a + POP3 server. To check email in a + secure manner, create an SSH connection + to the SSH server and tunnel through to + the mail server: - &prompt.user; ssh -2 -N -f -L 2110:mail.example.com:110 user@ssh-server.example.com + &prompt.user; ssh -2 -N -f -L 2110:mail.example.com:110 user@ssh-server.example.com user@ssh-server.example.com's password: ****** - When the tunnel is up and running, you can point your - mail client to send POP3 requests to localhost - port 2110. A connection here will be forwarded securely across - the tunnel to mail.example.com. - - - - Bypassing a Draconian Firewall - - Some network administrators impose extremely draconian - firewall rules, filtering not only incoming connections, - but outgoing connections. You may be only given access - to contact remote machines on ports 22 and 80 for SSH - and web surfing. - - You may wish to access another (perhaps non-work - related) service, such as an Ogg Vorbis server to stream - music. If this Ogg Vorbis server is streaming on some other - port than 22 or 80, you will not be able to access it. - - The solution is to create an SSH connection to a machine - outside of your network's firewall, and use it to tunnel to - the Ogg Vorbis server. + Once the tunnel is up and running, point the email + client to send POP3 requests to + localhost on port 2110. This + connection will be forwarded securely across the tunnel to + mail.example.com. + + + + Bypassing a Firewall + + Some firewalls + filter both incoming and outgoing connections. For + example, a firewall might limit access from remote + machines to ports 22 and 80 to only allow + SSH and web surfing. This prevents + access to any other service which uses a port other than + 22 or 80. + + The solution is to create an SSH + connection to a machine outside of the network's firewall + and use it to tunnel to the desired service: - &prompt.user; ssh -2 -N -f -L 8888:music.example.com:8000 user@unfirewalled-system.example.org + &prompt.user; ssh -2 -N -f -L 8888:music.example.com:8000 user@unfirewalled-system.example.org user@unfirewalled-system.example.org's password: ******* - Your streaming client can now be pointed to - localhost port 8888, which will be - forwarded over to music.example.com port - 8000, successfully evading the firewall. - + In this example, a streaming Ogg Vorbis client can now + be pointed to localhost port + 8888, which will be forwarded over to + music.example.com on port 8000, + successfully bypassing the firewall. + - The <varname>AllowUsers</varname> Users Option + Enabling the SSH Server + + + OpenSSH + enabling + + + In addition to providing built-in SSH + client utilities, a &os; system can be configured as an + SSH server, accepting connections from + other SSH clients. + + To see if sshd is enabled, + check /etc/rc.conf for this line and add + it if it is missing: + + sshd_enable="YES" + + This will start sshd, the + daemon program for OpenSSH, the + next time the system boots. To start it now: - It is often a good idea to limit which users can log in and - from where. The AllowUsers option is a good - way to accomplish this. For example, to only allow the - root user to log in from - 192.168.1.32, something like this - would be appropriate in the - /etc/ssh/sshd_config file: + &prompt.root; service sshd start + + The first time sshd starts on a + &os; system, the system's host keys will be automatically + created and the fingerprint will be displayed on the console. + Provide users with the fingerprint so that they can verify it + the first time they connect to the server. + + Refer to &man.sshd.8; for the list of available options + when starting sshd and a more + complete discussion about authentication, the login process, + and the various configuration files. + + It is a good idea to limit which users can log into the + SSH server and from where using the + AllowUsers keyword in the + OpenSSH server configuration file. + For example, to only allow root to log in from + 192.168.1.32, add + this line to /etc/ssh/sshd_config: AllowUsers root@192.168.1.32 - To allow the user admin to log in from - anywhere, just list the username by itself: + To allow admin + to log in from anywhere, list that user without specifying an + IP address: AllowUsers admin - Multiple users should be listed on the same line, like so: + Multiple users should be listed on the same line, like + so: AllowUsers root@192.168.1.32 admin - - It is important that you list each user that needs to - log in to this machine; otherwise they will be locked out. - - After making changes to - /etc/ssh/sshd_config you must tell - &man.sshd.8; to reload its config files, by running: + /etc/ssh/sshd_config, + tell sshd to reload its + configuration file by running: - &prompt.root; /etc/rc.d/sshd reload - + &prompt.root; service sshd reload - - Further Reading - OpenSSH - &man.ssh.1; &man.scp.1; &man.ssh-keygen.1; - &man.ssh-agent.1; &man.ssh-add.1; &man.ssh.config.5; - &man.sshd.8; &man.sftp-server.8; &man.sshd.config.5; + + When this keyword is used, it is important to list each + user that needs to log into this machine. Any user that is + not specified in that line will be locked out. Also, the + keywords used in the OpenSSH + server configuration file are case-sensitive. If the + keyword is not spelled correctly, including its case, it + will be ignored. Always test changes to this file to make + sure that the edits are working as expected. Refer to + &man.sshd.config.5; to verify the spelling and use of the + available keywords. + + + + Do not confuse /etc/ssh/sshd_config + with /etc/ssh/ssh_config (note the + extra d in the first filename). The + first file configures the server and the second file + configures the client. Refer to &man.ssh.config.5; for a + listing of the available client settings,. + - File System Access Control Lists + + Access Control Lists + - TomRhodesContributed by + TomRhodesContributed + by - - ACL - In conjunction with file system enhancements like snapshots, FreeBSD 5.0 - and later offers the security of File System Access Control Lists - (ACLs). - - Access Control Lists extend the standard &unix; - permission model in a highly compatible (&posix;.1e) way. This feature - permits an administrator to make use of and take advantage of a - more sophisticated security model. - - To enable ACL support for UFS - file systems, the following: + Access Control Lists (ACLs) extend the + standard &unix; permission model in a &posix;.1e compatible way. + This permits an administrator to take advantage of a more + fine-grained permissions model. + + The &os; GENERIC kernel provides + ACL support for UFS file + systems. Users who prefer to compile a custom kernel must + include the following option in their custom kernel + configuration file: options UFS_ACL - must be compiled into the kernel. If this option has - not been compiled in, a warning message will be displayed - when attempting to mount a file system supporting ACLs. - This option is included in the GENERIC kernel. - ACLs rely on extended attributes being enabled on - the file system. Extended attributes are natively supported in the next generation - &unix; file system, UFS2. - - A higher level of administrative overhead is required to - configure extended attributes on UFS1 than on - UFS2. The performance of extended attributes - on UFS2 is also substantially higher. As a - result, UFS2 is generally recommended in preference - to UFS1 for use with access control lists. - - ACLs are enabled by the mount-time administrative - flag, , which may be added to /etc/fstab. - The mount-time flag can also be automatically set in a persistent manner using - &man.tunefs.8; to modify a superblock ACLs flag in the - file system header. In general, it is preferred to use the superblock flag - for several reasons: + If this option is not compiled in, a warning message will be + displayed when attempting to mount a file system with + ACL support. ACLs rely on + extended attributes which are natively supported in + UFS2. + + This chapter describes how to enable + ACL support and provides some usage + examples. + + + Enabling <acronym>ACL</acronym> Support + + ACLs are enabled by the mount-time + administrative flag, , which may be added + to /etc/fstab. The mount-time flag can + also be automatically set in a persistent manner using + &man.tunefs.8; to modify a superblock ACLs + flag in the file system header. In general, it is preferred + to use the superblock flag for several reasons: - - - The mount-time ACLs flag cannot be changed by a - remount (&man.mount.8; ), only by means of a complete - &man.umount.8; and fresh &man.mount.8;. This means that - ACLs cannot be enabled on the root file system after boot. - It also means that you cannot change the disposition of a file system once - it is in use. - + + + The superblock flag cannot be changed by a remount + using as it requires a complete + umount and fresh + mount. This means that + ACLs cannot be enabled on the root file + system after boot. It also means that + ACL support on a file system cannot be + changed while the system is in use. + - - Setting the superblock flag will cause the file system to always be - mounted with ACLs enabled even if there is not an - fstab entry or if the devices re-order. This prevents - accidental mounting of the file system without ACLs - enabled, which can result in ACLs being improperly enforced, - and hence security problems. - - + + Setting the superblock flag causes the file system to + always be mounted with ACLs enabled, + even if there is not an fstab entry + or if the devices re-order. This prevents accidental + mounting of the file system without ACL + support. + + - We may change the ACLs behavior to allow the flag to - be enabled without a complete fresh &man.mount.8;, but we consider it desirable to - discourage accidental mounting without ACLs enabled, because you - can shoot your feet quite nastily if you enable ACLs, then disable - them, then re-enable them without flushing the extended attributes. In general, once - you have enabled ACLs on a file system, they should not be disabled, - as the resulting file protections may not be compatible with those intended by the - users of the system, and re-enabling ACLs may re-attach the previous - ACLs to files that have since had their permissions changed, - resulting in other unpredictable behavior. + + It is desirable to discourage accidental mounting + without ACLs enabled because nasty things + can happen if ACLs are enabled, then + disabled, then re-enabled without flushing the extended + attributes. In general, once ACLs are + enabled on a file system, they should not be disabled, as + the resulting file protections may not be compatible with + those intended by the users of the system, and re-enabling + ACLs may re-attach the previous + ACLs to files that have since had their + permissions changed, resulting in unpredictable + behavior. + - File systems with ACLs enabled will show a + - (plus) sign in their permission settings when viewed. For example: + File systems with ACLs enabled will + show a plus (+) sign in their permission + settings: - drwx------ 2 robert robert 512 Dec 27 11:54 private + drwx------ 2 robert robert 512 Dec 27 11:54 private drwxrwx---+ 2 robert robert 512 Dec 23 10:57 directory1 drwxrwx---+ 2 robert robert 512 Dec 22 10:20 directory2 drwxrwx---+ 2 robert robert 512 Dec 27 11:57 directory3 drwxr-xr-x 2 robert robert 512 Nov 10 11:54 public_html - Here we see that the directory1, - directory2, and directory3 - directories are all taking advantage of ACLs. The - public_html directory is not. + In this example, directory1, + directory2, and + directory3 are all taking advantage of + ACLs, whereas + public_html is not. + - Making Use of <acronym>ACL</acronym>s + Using <acronym>ACL</acronym>s - The file system ACLs can be viewed by the - &man.getfacl.1; utility. For instance, to view the - ACL settings on the test - file, one would use the command: + File system ACLs can be viewed using + getfacl. For instance, to view the + ACL settings on + test: &prompt.user; getfacl test #file:test @@ -4624,95 +3090,101 @@ group::r-- other::r-- - To change the ACL settings on this file, - invoke the &man.setfacl.1; utility. Observe: + To change the ACL settings on this + file, use setfacl. To remove all of the + currently defined ACLs from a file or file + system, include . However, the preferred + method is to use as it leaves the basic + fields required for ACLs to work. &prompt.user; setfacl -k test - The flag will remove all of the - currently defined ACLs from a file or file - system. The more preferable method would be to use - as it leaves the basic fields required for - ACLs to work. + To modify the default ACL entries, use + : &prompt.user; setfacl -m u:trhodes:rwx,group:web:r--,o::--- test - In the aforementioned command, the - option was used to modify the default ACL - entries. Since there were no pre-defined entries, as they were - removed by the previous command, this will restore the default - options and assign the options listed. Take care to notice that - if you add a user or group which does not exist on the system, - an Invalid argument error will be printed - to stdout. + In this example, there were no pre-defined entries, as + they were removed by the previous command. This command + restores the default options and assigns the options listed. + If a user or group is added which does not exist on the + system, an Invalid argument error will + be displayed. + + Refer to &man.getfacl.1; and &man.setfacl.1; for more + information about the options available for these + commands. - Monitoring Third Party Security Issues + + Monitoring Third Party Security Issues + - TomRhodesContributed by + TomRhodesContributed + by - - - Portaudit + portaudit - In recent years, the security world has made many improvements - to how vulnerability assessment is handled. The threat of system - intrusion increases as third party utilities are installed and - configured for virtually any operating system available - today. + In recent years, the security world has made many + improvements to how vulnerability assessment is handled. The + threat of system intrusion increases as third party utilities + are installed and configured for virtually any operating + system available today. - Vulnerability assessment is a key factor in security, and - while &os; releases advisories for the base system, doing so + Vulnerability assessment is a key factor in security. + While &os; releases advisories for the base system, doing so for every third party utility is beyond the &os; Project's capability. There is a way to mitigate third party vulnerabilities and warn administrators of known security issues. A &os; add on utility known as - Portaudit exists solely for this + portaudit exists solely for this purpose. - The security/portaudit port - polls a database, updated and maintained by the &os; Security - Team and ports developers, for known security issues. + The + ports-mgmt/portaudit + port polls a database, which is updated and maintained by the + &os; Security Team and ports developers, for known security + issues. - To begin using Portaudit, one - must install it from the Ports Collection: + To install portaudit from the + Ports Collection: - &prompt.root; cd /usr/ports/security/portaudit && make install clean + &prompt.root; cd /usr/ports/ports-mgmt/portaudit && make install clean - During the install process, the configuration files for + During the installation, the configuration files for &man.periodic.8; will be updated, permitting - Portaudit output in the daily security - runs. Ensure the daily security run emails, which are sent to - root's email account, are being read. No - more configuration will be required here. - - After installation, an administrator can update the database - and view known vulnerabilities in installed packages by invoking - the following command: + portaudit output in the daily + security runs. Ensure that the daily security run emails, which + are sent to root's + email account, are being read. No other configuration is + required. + + After installation, an administrator can update the + database and view known vulnerabilities in installed packages + by invoking the following command: &prompt.root; portaudit -Fda - The database will automatically be updated during the - &man.periodic.8; run; thus, the previous command is completely - optional. It is only required for the following - examples. + The database is automatically updated during the + &man.periodic.8; run. The above command is optional and can + be used to manually update the database now. To audit the third party utilities installed as part of - the Ports Collection at anytime, an administrator need only run - the following command: + the Ports Collection at anytime, an administrator can run the + following command: &prompt.root; portaudit -a - Portaudit will produce something - like this for vulnerable packages: + portaudit will display messages + for any installed vulnerable packages: Affected package: cups-base-1.1.22.0_1 Type of problem: cups-base -- HPGL buffer overflow vulnerability. @@ -4722,271 +3194,760 @@ You are advised to update or deinstall the affected package(s) immediately. - By pointing a web browser to the URL shown, - an administrator may obtain more information about the - vulnerability in question. This will include versions affected, - by &os; Port version, along with other web sites which may contain - security advisories. - - In short, Portaudit is a powerful - utility and extremely useful when coupled with the - Portupgrade port. + By pointing a web browser to the displayed + URL, an administrator may obtain more + information about the vulnerability. This will include the + versions affected, by &os; port version, along with other web + sites which may contain security advisories. + + portaudit is a powerful utility + and is extremely useful when coupled with the + portmaster port. - &os; Security Advisories + + &os; Security Advisories + - TomRhodesContributed by + TomRhodesContributed + by - - - FreeBSD Security Advisories + &os; Security Advisories - Like many production quality operating systems, &os; publishes - Security Advisories. These advisories are usually - mailed to the security lists and noted in the Errata only - after the appropriate releases have been patched. This section - will work to explain what an advisory is, how to understand it, - and what measures to take in order to patch a system. + Like many producers of quality operating systems, the &os; + Project has a security team which is responsible for + determining the End-of-Life (EoL) date for + each &os; release and to provide security updates for supported + releases which have not yet reached their + EoL. More information about the &os; + security team and the supported releases is available on the + &os; security + page. + + One task of the security team is to respond to reported + security vulnerabilities in the &os; operating system. Once a + vulnerability is confirmed, the security team verifies the steps + necessary to fix the vulnerability and updates the source code + with the fix. It then publishes the details as a + Security Advisory. Security + advisories are published on the &os; + website and mailed to the + &a.security-notifications.name;, &a.security.name;, and + &a.announce.name; mailing lists. + + This section describes the format of a &os; security + advisory. - What does an advisory look like? + Format of a Security Advisory - The &os; security advisories look similar to the one below, - taken from the &a.security-notifications.name; mailing list. + Here is an example of a &os; security advisory: ============================================================================= -&os;-SA-XX:XX.UTIL Security Advisory - The &os; Project +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA512 -Topic: denial of service due to some problem - -Category: core -Module: sys -Announced: 2003-09-23 -Credits: Person@EMAIL-ADDRESS -Affects: All releases of &os; - &os; 4-STABLE prior to the correction date -Corrected: 2003-09-23 16:42:59 UTC (RELENG_4, 4.9-PRERELEASE) - 2003-09-23 20:08:42 UTC (RELENG_5_1, 5.1-RELEASE-p6) - 2003-09-23 20:07:06 UTC (RELENG_5_0, 5.0-RELEASE-p15) - 2003-09-23 16:44:58 UTC (RELENG_4_8, 4.8-RELEASE-p8) - 2003-09-23 16:47:34 UTC (RELENG_4_7, 4.7-RELEASE-p18) - 2003-09-23 16:49:46 UTC (RELENG_4_6, 4.6-RELEASE-p21) - 2003-09-23 16:51:24 UTC (RELENG_4_5, 4.5-RELEASE-p33) - 2003-09-23 16:52:45 UTC (RELENG_4_4, 4.4-RELEASE-p43) - 2003-09-23 16:54:39 UTC (RELENG_4_3, 4.3-RELEASE-p39) -&os; only: NO +============================================================================= +FreeBSD-SA-14:04.bind Security Advisory + The FreeBSD Project + +Topic: BIND remote denial of service vulnerability + +Category: contrib +Module: bind +Announced: 2014-01-14 +Credits: ISC +Affects: FreeBSD 8.x and FreeBSD 9.x +Corrected: 2014-01-14 19:38:37 UTC (stable/9, 9.2-STABLE) + 2014-01-14 19:42:28 UTC (releng/9.2, 9.2-RELEASE-p3) + 2014-01-14 19:42:28 UTC (releng/9.1, 9.1-RELEASE-p10) + 2014-01-14 19:38:37 UTC (stable/8, 8.4-STABLE) + 2014-01-14 19:42:28 UTC (releng/8.4, 8.4-RELEASE-p7) + 2014-01-14 19:42:28 UTC (releng/8.3, 8.3-RELEASE-p14) +CVE Name: CVE-2014-0591 For general information regarding FreeBSD Security Advisories, including descriptions of the fields above, security branches, and the -following sections, please visit -http://www.FreeBSD.org/security/. +following sections, please visit <URL:http://security.FreeBSD.org/>. + +I. Background + +BIND 9 is an implementation of the Domain Name System (DNS) protocols. +The named(8) daemon is an Internet Domain Name Server. + +II. Problem Description + +Because of a defect in handling queries for NSEC3-signed zones, BIND can +crash with an "INSIST" failure in name.c when processing queries possessing +certain properties. This issue only affects authoritative nameservers with +at least one NSEC3-signed zone. Recursive-only servers are not at risk. + +III. Impact + +An attacker who can send a specially crafted query could cause named(8) +to crash, resulting in a denial of service. + +IV. Workaround + +No workaround is available, but systems not running authoritative DNS service +with at least one NSEC3-signed zone using named(8) are not vulnerable. + +V. Solution + +Perform one of the following: + +1) Upgrade your vulnerable system to a supported FreeBSD stable or +release / security branch (releng) dated after the correction date. -I. Background +2) To update your vulnerable system via a source code patch: +The following patches have been verified to apply to the applicable +FreeBSD release branches. -II. Problem Description +a) Download the relevant patch from the location below, and verify the +detached PGP signature using your PGP utility. +[FreeBSD 8.3, 8.4, 9.1, 9.2-RELEASE and 8.4-STABLE] +# fetch http://security.FreeBSD.org/patches/SA-14:04/bind-release.patch +# fetch http://security.FreeBSD.org/patches/SA-14:04/bind-release.patch.asc +# gpg --verify bind-release.patch.asc -III. Impact +[FreeBSD 9.2-STABLE] +# fetch http://security.FreeBSD.org/patches/SA-14:04/bind-stable-9.patch +# fetch http://security.FreeBSD.org/patches/SA-14:04/bind-stable-9.patch.asc +# gpg --verify bind-stable-9.patch.asc +b) Execute the following commands as root: -IV. Workaround +# cd /usr/src +# patch < /path/to/patch +Recompile the operating system using buildworld and installworld as +described in <URL:http://www.FreeBSD.org/handbook/makeworld.html>. -V. Solution +Restart the applicable daemons, or reboot the system. +3) To update your vulnerable system via a binary patch: -VI. Correction details +Systems running a RELEASE version of FreeBSD on the i386 or amd64 +platforms can be updated via the freebsd-update(8) utility: +# freebsd-update fetch +# freebsd-update install -VII. References +VI. Correction details + +The following list contains the correction revision numbers for each +affected branch. + +Branch/path Revision +- ------------------------------------------------------------------------- +stable/8/ r260646 +releng/8.3/ r260647 +releng/8.4/ r260647 +stable/9/ r260646 +releng/9.1/ r260647 +releng/9.2/ r260647 +- ------------------------------------------------------------------------- + +To see which files were modified by a particular revision, run the +following command, replacing NNNNNN with the revision number, on a +machine with Subversion installed: + +# svn diff -cNNNNNN --summarize svn://svn.freebsd.org/base + +Or visit the following URL, replacing NNNNNN with the revision number: + +<URL:http://svnweb.freebsd.org/base?view=revision&revision=NNNNNN> + +VII. References + +<URL:https://kb.isc.org/article/AA-01078> + +<URL:http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-0591> + +The latest revision of this advisory is available at +<URL:http://security.FreeBSD.org/advisories/FreeBSD-SA-14:04.bind.asc> +-----BEGIN PGP SIGNATURE----- + +iQIcBAEBCgAGBQJS1ZTYAAoJEO1n7NZdz2rnOvQP/2/68/s9Cu35PmqNtSZVVxVG +ZSQP5EGWx/lramNf9566iKxOrLRMq/h3XWcC4goVd+gZFrvITJSVOWSa7ntDQ7TO +XcinfRZ/iyiJbs/Rg2wLHc/t5oVSyeouyccqODYFbOwOlk35JjOTMUG1YcX+Zasg +ax8RV+7Zt1QSBkMlOz/myBLXUjlTZ3Xg2FXVsfFQW5/g2CjuHpRSFx1bVNX6ysoG +9DT58EQcYxIS8WfkHRbbXKh9I1nSfZ7/Hky/kTafRdRMrjAgbqFgHkYTYsBZeav5 +fYWKGQRJulYfeZQ90yMTvlpF42DjCC3uJYamJnwDIu8OhS1WRBI8fQfr9DRzmRua +OK3BK9hUiScDZOJB6OqeVzUTfe7MAA4/UwrDtTYQ+PqAenv1PK8DZqwXyxA9ThHb +zKO3OwuKOVHJnKvpOcr+eNwo7jbnHlis0oBksj/mrq2P9m2ueF9gzCiq5Ri5Syag +Wssb1HUoMGwqU0roS8+pRpNC8YgsWpsttvUWSZ8u6Vj/FLeHpiV3mYXPVMaKRhVm +067BA2uj4Th1JKtGleox+Em0R7OFbCc/9aWC67wiqI6KRyit9pYiF3npph+7D5Eq +7zPsUdDd+qc+UTiLp3liCRp5w6484wWdhZO6wRtmUgxGjNkxFoNnX8CitzF8AaqO +UWWemqWuz3lAZuORQ9KX +=OQzQ +-----END PGP SIGNATURE----- + + Every security advisory uses the following format: + + + + Each security advisory is signed by the + PGP key of the Security Officer. The + public key for the Security Officer can be verified at + . + + + The name of the security advisory always begins with + FreeBSD-SA- (for FreeBSD Security + Advisory), followed by the year in two digit format + (14:), followed by the advisory number + for that year (04.), followed by the + name of the affected application or subsystem + (bind). The advisory shown here is the + fourth advisory for 2014 and it affects + BIND. + - - - The Topic field indicates exactly what the problem is. - It is basically an introduction to the current security - advisory and notes the utility with the + + The Topic field summarizes the vulnerability. - + - - The Category refers to the affected part of the system - which may be one of core, contrib, or ports. The core + + The Category refers to the + affected part of the system which may be one of + core, contrib, or + ports. The core category means that the vulnerability affects a core - component of the &os; operating system. The contrib - category means that the vulnerability affects software - contributed to the &os; Project, such as - sendmail. Finally the ports - category indicates that the vulnerability affects add on - software available as part of the Ports Collection. - - - - The Module field refers to the component location, for - instance sys. In this example, we see that the module, - sys, is affected; therefore, this vulnerability - affects a component used within the kernel. - - - - The Announced field reflects the date said security - advisory was published, or announced to the world. This - means that the security team has verified that the problem - does exist and that a patch has been committed to the &os; + component of the &os; operating system. The + contrib category means that the + vulnerability affects software included with &os;, + such as BIND. The + ports category indicates that the + vulnerability affects software available through the Ports + Collection. + + + + The Module field refers to the + component location. In this example, the + bind module is affected; therefore, + this vulnerability affects an application installed with + the operating system. + + + + The Announced field reflects the + date the security advisory was published. This means + that the security team has verified that the problem + exists and that a patch has been committed to the &os; source code repository. - + + + + The Credits field gives credit to + the individual or organization who noticed the + vulnerability and reported it. + + + + The Affects field explains which + releases of &os; are affected by this + vulnerability. + + + + The Corrected field indicates the + date, time, time offset, and releases that were + corrected. The section in parentheses shows each branch + for which the fix has been merged, and the version number + of the corresponding release from that branch. The + release identifier itself includes the version number + and, if appropriate, the patch level. The patch level is + the letter p followed by a number, + indicating the sequence number of the patch, allowing + users to track which patches have already been applied to + the system. + + + + The CVE Name field lists the + advisory number, if one exists, in the public cve.mitre.org + security vulnerabilities database. + + + + The Background field provides a + description of the affected module. + + + + The Problem Description field + explains the vulnerability. This can include + information about the flawed code and how the utility + could be maliciously used. + + + + The Impact field describes what + type of impact the problem could have on a system. + + + + The Workaround field indicates if + a workaround is available to system administrators who + cannot immediately patch the system . + + + + The Solution field provides the + instructions for patching the affected system. This is a + step by step tested and verified method for getting a + system patched and working securely. + - - The Credits field gives credit to the individual or - organization who noticed the vulnerability and reported - it. - - - - The Affects field explains which releases of &os; are - affected by this vulnerability. For the kernel, a quick - look over the output from ident on the - affected files will help in determining the revision. - For ports, the version number is listed after the port name - in /var/db/pkg. If the system does not - sync with the &os; CVS repository and rebuild - daily, chances are that it is affected. - - - - The Corrected field indicates the date, time, time - offset, and release that was corrected. - - - - The &os; only field indicates whether this vulnerability - affects just &os;, or if it affects other operating systems - as well. - - - - The Background field gives information on exactly what - the affected utility is. Most of the time this is why - the utility exists in &os;, what it is used for, and a bit - of information on how the utility came to be. - - - - The Problem Description field explains the security hole - in depth. This can include information on flawed code, or - even how the utility could be maliciously used to open - a security hole. - - - - The Impact field describes what type of impact the - problem could have on a system. For example, this could - be anything from a denial of service attack, to extra - privileges available to users, or even giving the attacker - superuser access. - - - - The Workaround field offers a feasible workaround to - system administrators who may be incapable of upgrading - the system. This may be due to time constraints, network - availability, or a slew of other reasons. Regardless, - security should not be taken lightly, and an affected system - should either be patched or the security hole workaround - should be implemented. - - - - The Solution field offers instructions on patching the - affected system. This is a step by step tested and verified - method for getting a system patched and working - securely. - - - - The Correction Details field displays the - CVS branch or release name with the - periods changed to underscore characters. It also shows - the revision number of the affected files within each - branch. - - - - The References field usually offers sources of other - information. This can included web URLs, - books, mailing lists, and newsgroups. - - + + The Correction Details field + displays each affected Subversion branch with the revision + number that contains the corrected code. + + + + The References field offers sources + of additional information regarding the + vulnerability. + + - Process Accounting + + Process Accounting + - TomRhodesContributed by + TomRhodesContributed + by - - Process Accounting Process accounting is a security method in which an - administrator may keep track of system resources used, + administrator may keep track of system resources used and their allocation among users, provide for system monitoring, and minimally track a user's commands. - This indeed has its own positive and negative points. One of - the positives is that an intrusion may be narrowed down + Process accounting has both positive and negative points. + One of the positives is that an intrusion may be narrowed down to the point of entry. A negative is the amount of logs generated by process accounting, and the disk space they may - require. This section will walk an administrator through - the basics of process accounting. + require. This section walks an administrator through the basics + of process accounting. + + + If more fine-grained accounting is needed, refer to + . + - Enable and Utilizing Process Accounting - Before making use of process accounting, it - must be enabled. To do this, execute the following - commands: + Enabling and Utilizing Process Accounting - &prompt.root; touch /var/account/acct + Before using process accounting, it must be enabled using + the following commands: + &prompt.root; touch /var/account/acct +&prompt.root; chmod 600 /var/account/acct &prompt.root; accton /var/account/acct - &prompt.root; echo 'accounting_enable="YES"' >> /etc/rc.conf - Once enabled, accounting will begin to track - CPU stats, commands, etc. All accounting - logs are in a non-human readable format and may be viewed - using the &man.sa.8; utility. If issued without any options, - sa will print information relating to the - number of per user calls, the total elapsed time in minutes, - total CPU and user time in minutes, average - number of I/O operations, etc. - - To view information about commands being issued, one - would use the &man.lastcomm.1; utility. The - lastcomm may be used to print out commands - issued by users on specific &man.ttys.5;, for example: - - &prompt.root; lastcomm ls - trhodes ttyp1 - - Would print out all known usage of the ls - by trhodes on the ttyp1 terminal. - - Many other useful options exist and are explained in the - &man.lastcomm.1;, &man.acct.5; and &man.sa.8; manual - pages. + Once enabled, accounting will begin to track information + such as CPU statistics and executed + commands. All accounting logs are in a non-human readable + format which can be viewed using sa. If + issued without any options, sa prints + information relating to the number of per-user calls, the + total elapsed time in minutes, total CPU + and user time in minutes, and the average number of + I/O operations. Refer to &man.sa.8; for + the list of available options which control the output. + + To display the commands issued by users, use + lastcomm. For example, this command + prints out all usage of ls by trhodes on the + ttyp1 terminal: + + &prompt.root; lastcomm ls trhodes ttyp1 + + Many other useful options exist and are explained in + &man.lastcomm.1;, &man.acct.5;, and &man.sa.8;. + + + + + + Resource Limits + + + TomRhodesContributed + by + + + + + Resource limits + + + &os; provides several methods for an administrator to + limit the amount of system resources an individual may use. + Disk quotas limit the amount of disk space available to users. + Quotas are discussed in . + + + quotas + + + limiting users + quotas + + + disk quotas + + + Limits to other resources, such as CPU + and memory, can be set using either a flat file or a command to + configure a resource limits database. The traditional method + defines login classes by editing + /etc/login.conf. While this method is + still supported, any changes require a multi-step process of + editing this file, rebuilding the resource database, making + necessary changes to /etc/master.passwd, + and rebuilding the password database. This can become time + consuming, depending upon the number of users to + configure. + + Beginning with &os; 9.0-RELEASE, + rctl can be used to provide a more + fine-grained method for controlling resource limits. This + command supports more than user limits as it can also be used to + set resource constraints on processes and jails. + + This section demonstrates both methods for controlling + resources, beginning with the traditional method. + + + Configuring Login Classes + + + limiting users + + + accounts + limiting + + + /etc/login.conf + + + In the traditional method, login classes and the resource + limits to apply to a login class are defined in + /etc/login.conf. Each user account can + be assigned to a login class, where default + is the default login class. Each login class has a set of + login capabilities associated with it. A login capability is + a + name=value + pair, where name is a well-known + identifier and value is an + arbitrary string which is processed accordingly depending on + the name. + + + Whenever /etc/login.conf is edited, + the /etc/login.conf.db must be updated + by executing the following command: + + &prompt.root; cap_mkdb /etc/login.conf + + + Resource limits differ from the default login capabilities + in two ways. First, for every limit, there is a + soft and hard + limit. A soft limit may be adjusted by the user or + application, but may not be set higher than the hard limit. + The hard limit may be lowered by the user, but can only be + raised by the superuser. Second, most resource limits apply + per process to a specific user. + + lists the most commonly + used resource limits. All of the available resource limits + and capabilities are described in detail in + &man.login.conf.5;. + + + limiting users + coredumpsize + + + limiting users + cputime + + + limiting users + filesize + + + limiting users + maxproc + + + limiting users + memorylocked + + + limiting users + memoryuse + + + limiting users + openfiles + + + limiting users + sbsize + + + limiting users + stacksize + + + + Login Class Resource Limits + + + + + Resource Limit + Description + + + + + + coredumpsize + The limit on the size of a core file generated by + a program is subordinate to other limits on disk + usage, such as filesize or disk + quotas. This limit is often used as a less severe + method of controlling disk space consumption. Since + users do not generate core files and often do not + delete them, this setting may save them from running + out of disk space should a large program + crash. + + + + cputime + The maximum amount of CPU time + a user's process may consume. Offending processes + will be killed by the kernel. This is a limit on + CPU time + consumed, not the percentage of the + CPU as displayed in some of the + fields generated by top and + ps. + + + + filesize + The maximum size of a file the user may own. + Unlike disk quotas (), this + limit is enforced on individual files, not the set of + all files a user owns. + + + + maxproc + The maximum number of foreground and background + processes a user can run. This limit may not be + larger than the system limit specified by + kern.maxproc. Setting this limit + too small may hinder a user's productivity as some + tasks, such as compiling a large program, start lots + of processes. + + + + memorylocked + The maximum amount of memory a process may + request to be locked into main memory using + &man.mlock.2;. Some system-critical programs, such as + &man.amd.8;, lock into main memory so that if the + system begins to swap, they do not contribute to disk + thrashing. + + + + memoryuse + The maximum amount of memory a process may + consume at any given time. It includes both core + memory and swap usage. This is not a catch-all limit + for restricting memory consumption, but is a good + start. + + + + openfiles + The maximum number of files a process may have + open. In &os;, files are used to represent sockets + and IPC channels, so be careful not + to set this too low. The system-wide limit for this + is defined by + kern.maxfiles. + + + + sbsize + The limit on the amount of network memory a user + may consume. This can be generally used to limit + network communications. + + + + stacksize + The maximum size of a process stack. This alone + is not sufficient to limit the amount of memory a + program may use, so it should be used in conjunction + with other limits. + + + +
+ + There are a few other things to remember when setting + resource limits: + + + + Processes started at system startup by + /etc/rc are assigned to the + daemon login class. + + + + Although the default + /etc/login.conf is a good source of + reasonable values for most limits, they may not be + appropriate for every system. Setting a limit too high + may open the system up to abuse, while setting it too low + may put a strain on productivity. + + + + &xorg; takes a lot of + resources and encourages users to run more programs + simultaneously. + + + + Many limits apply to individual processes, not the + user as a whole. For example, setting + openfiles to 50 + means that each process the user runs may open up to + 50 files. The total amount of files a + user may open is the value of openfiles + multiplied by the value of maxproc. + This also applies to memory consumption. + + + + For further information on resource limits and login + classes and capabilities in general, refer to + &man.cap.mkdb.1;, &man.getrlimit.2;, and + &man.login.conf.5;. +
+ + + Enabling and Configuring Resource Limits + + By default, kernel support for rctl is + not built-in, meaning that the kernel will first need to be + recompiled using the instructions in . Add these lines to either + GENERIC or a custom kernel configuration + file, then rebuild the kernel: + + options RACCT +options RCTL + + Once the system has rebooted into the new kernel, + rctl may be used to set rules for the + system. + + Rule syntax is controlled through the use of a subject, + subject-id, resource, and action, as seen in this example + rule: + + user:trhodes:maxproc:deny=10/user + + In this rule, the subject is user, the + subject-id is trhodes, the resource, + maxproc, is the maximum number of + processes, and the action is deny, which + blocks any new processes from being created. This means that + the user, trhodes, will be constrained to + no greater than 10 processes. Other + possible actions include logging to the console, passing a + notification to &man.devd.8;, or sending a sigterm to the + process. + + Some care must be taken when adding rules. Since this + user is constrained to 10 processes, this + example will prevent the user from performing other tasks + after logging in and executing a + screen session. Once a resource limit has + been hit, an error will be printed, as in this example: + + &prompt.user; man test + /usr/bin/man: Cannot fork: Resource temporarily unavailable +eval: Cannot fork: Resource temporarily unavailable + + As another example, a jail can be prevented from exceeding + a memory limit. This rule could be written as: + + &prompt.root; rctl -a jail:httpd:memoryuse:deny=2G/jail + + Rules will persist across reboots if they have been added + to /etc/rctl.conf. The format is a rule, + without the preceding command. For example, the previous rule + could be added as: + + # Block jail from using more than 2G memory: +jail:httpd:memoryuse:deny=2G/jail + + To remove a rule, use rctl to remove it + from the list: + + &prompt.root; rctl -r user:trhodes:maxproc:deny=10/user + + A method for removing all rules is documented in + &man.rctl.8;. However, if removing all rules for a single + user is required, this command may be issued: + + &prompt.root; rctl -r user:trhodes + + Many other resources exist which can be used to exert + additional control over various subjects. + See &man.rctl.8; to learn about them.
Index: zh_TW.UTF-8/books/handbook/serialcomms/chapter.xml =================================================================== --- zh_TW.UTF-8/books/handbook/serialcomms/chapter.xml +++ zh_TW.UTF-8/books/handbook/serialcomms/chapter.xml @@ -2666,7 +2666,7 @@ - Get the kernel source. (See ) + Get the kernel source. Index: zh_TW.UTF-8/share/xml/mailing-lists.ent =================================================================== --- zh_TW.UTF-8/share/xml/mailing-lists.ent +++ zh_TW.UTF-8/share/xml/mailing-lists.ent @@ -3,7 +3,7 @@ Names of FreeBSD 郵遞論壇s and related software. The FreeBSD Traditional-Chinese Documentation Project - Original revision: 1.57 + Original revision: r46084 $FreeBSD$ --> @@ -27,10 +27,6 @@ FreeBSD Adaptec AIC7xxx discussions 郵遞論壇"> freebsd-aic7xxx"> - -FreeBSD Alpha porting 郵遞論壇"> -freebsd-alpha"> - Porting FreeBSD to AMD64 systems"> freebsd-amd64"> @@ -55,14 +51,6 @@ FreeBSD ATM networking 郵遞論壇"> freebsd-atm"> - -FreeBSD source code audit 郵遞論壇"> -freebsd-audit"> - - -FreeBSD binary update system 郵遞論壇"> -freebsd-binup"> - FreeBSD Bluetooth 郵遞論壇"> freebsd-bluetooth"> @@ -79,6 +67,10 @@ FreeBSD chat 郵遞論壇"> freebsd-chat"> + +FreeBSD-specific Chromium issues"> +freebsd-chromium"> + FreeBSD clustering 郵遞論壇"> freebsd-cluster"> @@ -86,7 +78,6 @@ - @@ -97,14 +88,34 @@ CTM 公告"> ctm-announce"> - -透過 CTM 發佈的 CVS 檔案"> -ctm-cvs-cur"> - CTM 4-STABLE src branch distribution 郵遞論壇"> ctm-src-4"> + +CTM 5-STABLE src branch distribution mailing list"> +ctm-src-5"> + + +CTM 6-STABLE src branch distribution mailing list"> +ctm-src-6"> + + +CTM 7-STABLE src branch distribution mailing list"> +ctm-src-7"> + + +CTM 8-STABLE src branch distribution mailing list"> +ctm-src-8"> + + +CTM 9-STABLE src branch distribution mailing list"> +ctm-src-9"> + + +CTM 10-STABLE src branch distribution mailing list"> +ctm-src-10"> + CTM -CURRENT src branch distribution 郵遞論壇"> ctm-src-cur"> @@ -133,15 +144,15 @@ FreeBSD CVS src commit list"> cvs-src"> - -FreeBSD CVSweb 維護郵遞論壇"> -freebsd-cvsweb"> - FreeBSD based Databases 郵遞論壇"> freebsd-database"> +Using and improving &os; on the desktop"> +freebsd-desktop"> + + @@ -158,10 +169,13 @@ Writing device drivers for FreeBSD"> freebsd-drivers"> + +Using and working on DTrace in &os;."> +freebsd-dtrace"> + FreeBSD users of Eclipse IDE, tools, rich client applications and ports"> freebsd-eclipse"> - FreeBSD-embedded 郵遞論壇"> freebsd-embedded"> @@ -170,6 +184,10 @@ FreeBSD-emulation 郵遞論壇"> freebsd-emulation"> + +FreeBSD-enlightenment mailing list"> +freebsd-enlightenment"> + FreeBSD-eol 郵遞論壇"> freebsd-eol"> @@ -178,14 +196,30 @@ FreeBSD FireWire (IEEE 1394) discussion 郵遞論壇"> freebsd-firewire"> + +Fortran on FreeBSD mailing list"> +freebsd-fortran"> + FreeBSD file system project 郵遞論壇"> freebsd-fs"> + +Games on FreeBSD mailing list"> +freebsd-games"> + + +FreeBSD gecko mailing list"> +freebsd-gecko"> + FreeBSD GEOM 郵遞論壇"> freebsd-geom"> + +Discussion of git use in the FreeBSD project"> +freebsd-git"> + FreeBSD GNOME and GNOME applications 郵遞論壇"> freebsd-gnome"> @@ -218,6 +252,10 @@ FreeBSD IA64 porting 郵遞論壇"> freebsd-ia64"> + +Infiniband on FreeBSD"> +freebsd-infiniband"> + FreeBSD IPFW code 郵遞論壇"> freebsd-ipfw"> @@ -250,10 +288,6 @@ FreeBSD LFS porting 郵遞論壇"> freebsd-lfs"> - -FreeBSD libh installation and packaging system 郵遞論壇"> -freebsd-libh"> - FreeBSD MIPS porting 郵遞論壇"> freebsd-mips"> @@ -286,9 +320,17 @@ FreeBSD new-bus 郵遞論壇"> freebsd-new-bus"> - -FreeBSD OpenOffice 郵遞論壇"> -freebsd-openoffice"> + +Discussions of high quality implementation of libm functions"> +freebsd-numerics"> + + +Office applications on FreeBSD"> +freebsd-office"> + + +Project Infrastructure Announcements"> +freebsd-ops-announce"> FreeBSD performance 郵遞論壇"> @@ -302,18 +344,25 @@ FreeBSD packet filter 郵遞論壇"> freebsd-pf"> + +Binary package management and package tools discussion"> +freebsd-pkg"> + + +Fallout logs from package building"> +freebsd-pkg-fallout"> + FreeBSD non-Intel platforms porting 郵遞論壇"> freebsd-platforms"> - - -FreeBSD core team policy decisions 郵遞論壇"> -freebsd-policy"> - FreeBSD ports 郵遞論壇"> freebsd-ports"> + +FreeBSD ports announce mailing list"> +freebsd-ports-announce"> + FreeBSD ports bugs 郵遞論壇"> freebsd-ports-bugs"> @@ -336,10 +385,6 @@ FreeBSD Python 郵遞論壇"> freebsd-python"> - -FreeBSD Quality Assurance 郵遞論壇"> -freebsd-qa"> - FreeBSD general questions 郵遞論壇"> freebsd-questions"> @@ -352,6 +397,10 @@ FreeBSD realtime extensions 郵遞論壇"> freebsd-realtime"> + +FreeBSD Ruby mailing list"> +freebsd-ruby"> + FreeBSD SCSI subsystem 郵遞論壇"> freebsd-scsi"> @@ -368,9 +417,9 @@ FreeBSD-small 郵遞論壇"> freebsd-small"> - -FreeBSD symmetric multiprocessing 郵遞論壇"> -freebsd-smp"> + +FreeBSD Development Snapshot Announcements"> +freebsd-snapshots"> FreeBSD SPARC porting 郵遞論壇"> @@ -394,6 +443,102 @@ FreeBSD sun4v porting 郵遞論壇"> freebsd-sun4v"> + +SVN commit messages for the entire doc tree (except for user, projects and translations)"> +svn-doc-all"> + + +SVN commit messages for the doc tree for head/"> +svn-doc-head"> + + +SVN commit messages for the doc projects tree"> +svn-doc-projects"> + + +SVN commit messages for the doc admin / configuration tree"> +svn-doc-svnadmin"> + + +SVN commit messages for the entire ports tree"> +svn-ports-all"> + + +SVN commit messages for the ports tree for head/"> +svn-ports-head"> + + +SVN commit messages for the ports admin / configuration tree"> +svn-ports-svnadmin"> + + +SVN commit messages for the entire src tree (except for user and projects)"> +svn-src-all"> + + +SVN commit messages for the src tree for head/-current"> +svn-src-head"> + + +SVN commit messages for the src projects tree"> +svn-src-projects"> + + +SVN commit messages for releases in the src tree"> +svn-src-release"> + + +SVN commit messages for the release engineering / security commits to the src tree"> +svn-src-releng"> + + +SVN commit messages for all the -stable branches of the src tree"> +svn-src-stable"> + + +SVN commit messages for only the 6-stable src tree"> +svn-src-stable-6"> + + +SVN commit messages for only the 7-stable src tree"> +svn-src-stable-7"> + + +SVN commit messages for only the 8-stable src tree"> +svn-src-stable-8"> + + +SVN commit messages for only the 9-stable src tree"> +svn-src-stable-9"> + + +SVN commit messages for only the 10-stable src tree"> +svn-src-stable-10"> + + +SVN commit messages for the old stable src trees"> +svn-src-stable-other"> + + +SVN commit messages for the admin / configuration tree"> +svn-src-svnadmin"> + + +SVN commit messages for the experimental user src tree"> +svn-src-user"> + + +SVN commit messages for the vendor work area tree"> +svn-src-vendor"> + + +Sysinstall development mailing list"> +freebsd-sysinstall"> + + +FreeBSD-specific Tcl/Tk discussions"> +freebsd-tcltk"> + FreeBSD test 郵遞論壇"> freebsd-test"> @@ -402,14 +547,30 @@ FreeBSD performance and stability testing 郵遞論壇"> freebsd-testing"> + +Porting TeX and its applications to &os;"> +freebsd-tex"> + FreeBSD threads 郵遞論壇"> freebsd-threads"> + +Porting FreeBSD to the Tilera family of CPUs"> +freebsd-tilera"> + FreeBSD tokenring 郵遞論壇"> freebsd-tokenring"> + +FreeBSD integrated toolchain mailing list"> +freebsd-toolchain"> + + +FreeBSD translators mailing list"> +freebsd-translators"> + FreeBSD USB 郵遞論壇"> freebsd-usb"> @@ -422,11 +583,23 @@ FreeBSD vendors pre-release coordination 郵遞論壇"> freebsd-vendors"> + +Discussion of various virtualization techniques supported by FreeBSD"> +freebsd-virtualization"> + Discussion on the VuXML infrastructure"> freebsd-vuxml"> + +FreeBSD Work-In-Progress Status"> +freebsd-wip-status"> + + +Discussions of 802.11 stack, tools, device driver development"> +freebsd-wireless"> + FreeBSD Webmaster 郵遞論壇"> freebsd-www"> @@ -435,9 +608,38 @@ FreeBSD X11 郵遞論壇"> freebsd-x11"> + +FreeBSD port to Xen mailing list"> +freebsd-xen"> + + +XFCE for FreeBSD mailing list"> +freebsd-xfce"> + + +Zope for FreeBSD mailing list"> +freebsd-zope"> + bug-followup@FreeBSD.org"> majordomo@FreeBSD.org"> + + + + +FreeBSD Alpha porting mailing list"> +freebsd-alpha"> + + +FreeBSD Quality Assurance mailing list"> +freebsd-qa"> + + +FreeBSD symmetric multiprocessing mailing list"> +freebsd-smp">