Page MenuHomeFreeBSD

D9614.diff
No OneTemporary

D9614.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sbin/Makefile
===================================================================
--- sbin/Makefile
+++ sbin/Makefile
@@ -49,6 +49,7 @@
mount_nfs \
mount_nullfs \
mount_udf \
+ mount_udf2 \
mount_unionfs \
newfs \
newfs_msdos \
Index: sbin/mount_udf2/Makefile
===================================================================
--- /dev/null
+++ sbin/mount_udf2/Makefile
@@ -0,0 +1,19 @@
+# $FreeBSD$
+
+PROG= mount_udf2
+SRCS= mount_udf.c getmntopts.c
+MAN= mount_udf2.8
+LIBADD+=kiconv
+
+MOUNT= ${.CURDIR}/../mount
+CFLAGS+= -I${MOUNT} -I${.CURDIR}/../../sys
+.PATH: ${MOUNT}
+WARNS?= 1
+
+# Needs to be dynamically linked for optional dlopen() access to
+# userland libiconv
+NO_SHARED?= NO
+
+BINDIR=/sbin
+
+.include <bsd.prog.mk>
Index: sbin/mount_udf2/getmntopts.c
===================================================================
--- /dev/null
+++ sbin/mount_udf2/getmntopts.c
@@ -0,0 +1,182 @@
+/*-
+ * Copyright (c) 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if 0
+#ifndef lint
+static char sccsid[] = "@(#)getmntopts.c 8.3 (Berkeley) 3/29/95";
+#endif /* not lint */
+#endif
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/uio.h>
+
+#include <err.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sysexits.h>
+
+#include "mntopts.h"
+
+int getmnt_silent = 0;
+
+void
+getmntopts(const char *options, const struct mntopt *m0, int *flagp,
+ int *altflagp)
+{
+ const struct mntopt *m;
+ int negative, len;
+ char *opt, *optbuf, *p;
+ int *thisflagp;
+
+ /* Copy option string, since it is about to be torn asunder... */
+ if ((optbuf = strdup(options)) == NULL)
+ err(1, NULL);
+
+ for (opt = optbuf; (opt = strtok(opt, ",")) != NULL; opt = NULL) {
+ /* Check for "no" prefix. */
+ if (opt[0] == 'n' && opt[1] == 'o') {
+ negative = 1;
+ opt += 2;
+ } else
+ negative = 0;
+
+ /*
+ * for options with assignments in them (ie. quotas)
+ * ignore the assignment as it's handled elsewhere
+ */
+ p = strchr(opt, '=');
+ if (p != NULL)
+ *++p = '\0';
+
+ /* Scan option table. */
+ for (m = m0; m->m_option != NULL; ++m) {
+ len = strlen(m->m_option);
+ if (strncasecmp(opt, m->m_option, len) == 0)
+ if (opt[len] == '\0' || opt[len] == '=')
+ break;
+ }
+
+ /* Save flag, or fail if option is not recognized. */
+ if (m->m_option) {
+ thisflagp = m->m_altloc ? altflagp : flagp;
+ if (negative == m->m_inverse)
+ *thisflagp |= m->m_flag;
+ else
+ *thisflagp &= ~m->m_flag;
+ } else if (!getmnt_silent) {
+ errx(1, "-o %s: option not supported", opt);
+ }
+ }
+
+ free(optbuf);
+}
+
+void
+rmslashes(char *rrpin, char *rrpout)
+{
+ char *rrpoutstart;
+
+ *rrpout = *rrpin;
+ for (rrpoutstart = rrpout; *rrpin != '\0'; *rrpout++ = *rrpin++) {
+
+ /* skip all double slashes */
+ while (*rrpin == '/' && *(rrpin + 1) == '/')
+ rrpin++;
+ }
+
+ /* remove trailing slash if necessary */
+ if (rrpout - rrpoutstart > 1 && *(rrpout - 1) == '/')
+ *(rrpout - 1) = '\0';
+ else
+ *rrpout = '\0';
+}
+
+void
+checkpath(const char *path, char *resolved)
+{
+ struct stat sb;
+
+ if (realpath(path, resolved) != NULL && stat(resolved, &sb) == 0) {
+ if (!S_ISDIR(sb.st_mode))
+ errx(EX_USAGE, "%s: not a directory", resolved);
+ } else
+ errx(EX_USAGE, "%s: %s", resolved, strerror(errno));
+}
+
+void
+build_iovec(struct iovec **iov, int *iovlen, const char *name, void *val,
+ size_t len)
+{
+ int i;
+
+ if (*iovlen < 0)
+ return;
+ i = *iovlen;
+ *iov = realloc(*iov, sizeof **iov * (i + 2));
+ if (*iov == NULL) {
+ *iovlen = -1;
+ return;
+ }
+ (*iov)[i].iov_base = strdup(name);
+ (*iov)[i].iov_len = strlen(name) + 1;
+ i++;
+ (*iov)[i].iov_base = val;
+ if (len == (size_t)-1) {
+ if (val != NULL)
+ len = strlen(val) + 1;
+ else
+ len = 0;
+ }
+ (*iov)[i].iov_len = (int)len;
+ *iovlen = ++i;
+}
+
+/*
+ * This function is needed for compatibility with parameters
+ * which used to use the mount_argf() command for the old mount() syscall.
+ */
+void
+build_iovec_argf(struct iovec **iov, int *iovlen, const char *name,
+ const char *fmt, ...)
+{
+ va_list ap;
+ char val[255] = { 0 };
+
+ va_start(ap, fmt);
+ vsnprintf(val, sizeof(val), fmt, ap);
+ va_end(ap);
+ build_iovec(iov, iovlen, name, strdup(val), (size_t)-1);
+}
Index: sbin/mount_udf2/mntopts.h
===================================================================
--- /dev/null
+++ sbin/mount_udf2/mntopts.h
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)mntopts.h 8.7 (Berkeley) 3/29/95
+ * $FreeBSD$
+ */
+
+struct mntopt {
+ const char *m_option; /* option name */
+ int m_inverse; /* if a negative option, e.g. "atime" */
+ int m_flag; /* bit to set, e.g. MNT_RDONLY */
+ int m_altloc; /* 1 => set bit in altflags */
+};
+
+/* User-visible MNT_ flags. */
+#define MOPT_ASYNC { "async", 0, MNT_ASYNC, 0 }
+#define MOPT_NOATIME { "atime", 1, MNT_NOATIME, 0 }
+#define MOPT_NOEXEC { "exec", 1, MNT_NOEXEC, 0 }
+#define MOPT_NOSUID { "suid", 1, MNT_NOSUID, 0 }
+#define MOPT_NOSYMFOLLOW { "symfollow", 1, MNT_NOSYMFOLLOW, 0 }
+#define MOPT_RDONLY { "rdonly", 0, MNT_RDONLY, 0 }
+#define MOPT_SYNC { "sync", 0, MNT_SYNCHRONOUS, 0 }
+#define MOPT_UNION { "union", 0, MNT_UNION, 0 }
+#define MOPT_USERQUOTA { "userquota", 0, 0, 0 }
+#define MOPT_GROUPQUOTA { "groupquota", 0, 0, 0 }
+#define MOPT_NOCLUSTERR { "clusterr", 1, MNT_NOCLUSTERR, 0 }
+#define MOPT_NOCLUSTERW { "clusterw", 1, MNT_NOCLUSTERW, 0 }
+#define MOPT_SUIDDIR { "suiddir", 0, MNT_SUIDDIR, 0 }
+#define MOPT_SNAPSHOT { "snapshot", 0, MNT_SNAPSHOT, 0 }
+#define MOPT_MULTILABEL { "multilabel", 0, MNT_MULTILABEL, 0 }
+#define MOPT_ACLS { "acls", 0, MNT_ACLS, 0 }
+#define MOPT_NFS4ACLS { "nfsv4acls", 0, MNT_NFS4ACLS, 0 }
+
+/* Control flags. */
+#define MOPT_FORCE { "force", 0, MNT_FORCE, 0 }
+#define MOPT_UPDATE { "update", 0, MNT_UPDATE, 0 }
+#define MOPT_RO { "ro", 0, MNT_RDONLY, 0 }
+#define MOPT_RW { "rw", 1, MNT_RDONLY, 0 }
+
+/* This is parsed by mount(8), but is ignored by specific mount_*(8)s. */
+#define MOPT_AUTO { "auto", 0, 0, 0 }
+
+/* A handy macro as terminator of MNT_ array. */
+#define MOPT_END { NULL, 0, 0, 0 }
+
+#define MOPT_FSTAB_COMPAT \
+ MOPT_RO, \
+ MOPT_RW, \
+ MOPT_AUTO
+
+/* Standard options which all mounts can understand. */
+#define MOPT_STDOPTS \
+ MOPT_USERQUOTA, \
+ MOPT_GROUPQUOTA, \
+ MOPT_FSTAB_COMPAT, \
+ MOPT_NOATIME, \
+ MOPT_NOEXEC, \
+ MOPT_SUIDDIR, /* must be before MOPT_NOSUID */ \
+ MOPT_NOSUID, \
+ MOPT_NOSYMFOLLOW, \
+ MOPT_RDONLY, \
+ MOPT_UNION, \
+ MOPT_NOCLUSTERR, \
+ MOPT_NOCLUSTERW, \
+ MOPT_MULTILABEL, \
+ MOPT_ACLS, \
+ MOPT_NFS4ACLS
+
+void getmntopts(const char *, const struct mntopt *, int *, int *);
+void rmslashes(char *, char *);
+void checkpath(const char *, char resolved_path[]);
+extern int getmnt_silent;
+void build_iovec(struct iovec **iov, int *iovlen, const char *name, void *val, size_t len);
+void build_iovec_argf(struct iovec **iov, int *iovlen, const char *name, const char *fmt, ...);
Index: sbin/mount_udf2/mount_udf.c
===================================================================
--- /dev/null
+++ sbin/mount_udf2/mount_udf.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 1992, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 2002 Scott Long
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ *
+ * This code is derived from software contributed to Berkeley
+ * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension
+ * Support code is derived from software contributed to Berkeley
+ * by Atsushi Murai (amurai@spec.co.jp).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * This is just a rip-off of mount_iso9660.c. It's been vastly simplified
+ * because UDF doesn't take any options at this time.
+ */
+
+#include <sys/cdio.h>
+#include <sys/file.h>
+#include <sys/iconv.h>
+#include <sys/param.h>
+#include <sys/linker.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+#include <sys/uio.h>
+#include <sys/endian.h>
+#include <sys/ioctl.h>
+
+#include "fs/udf2/udf_mount.h"
+
+#include <err.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sysexits.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "mntopts.h"
+
+
+struct mntopt mopts[] = {
+ MOPT_STDOPTS,
+ MOPT_UPDATE,
+ MOPT_END
+};
+
+static int set_charset(char **, char **, const char *);
+static void usage(void);
+
+int
+main(int argc, char **argv)
+{
+ struct udf_args args;
+ struct iovec iov[14];
+ int ch, i, mntflags, opts, udf_flags, verbose;
+ char *dev, *dir, mntpath[MAXPATHLEN], *cs_disk, *cs_local, *endp;
+
+ /* read in disk info from options */
+ args.anon_uid = 0;
+ args.anon_gid = 0;
+ args.nobody_uid = -1;
+ args.nobody_gid = -1;
+
+ i = mntflags = opts = udf_flags = verbose = 0;
+ cs_disk = cs_local = NULL;
+ while ((ch = getopt(argc, argv, "o:vC:s:c:g:u:")) != -1)
+ switch (ch) {
+ case 'c':
+ args.udfmflags |= UDFMNT_CLOSESESSION;
+ break;
+ case 'g':
+ args.anon_gid = strtol(optarg, &endp, 10);
+ if (optarg == endp || *endp != '\0')
+ usage();
+ break;
+ case 'u':
+ args.anon_uid = strtol(optarg, &endp, 10);
+ if (optarg == endp || *endp != '\0')
+ usage();
+ break;
+ case 'o':
+ getmntopts(optarg, mopts, &mntflags, &opts);
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 'C':
+ if (set_charset(&cs_disk, &cs_local, optarg) == -1)
+ err(EX_OSERR, "udf2_iconv");
+ udf_flags |= UDFMNT_KICONV;
+ break;
+ case 's':
+ args.sessionnr = strtol(optarg, &endp, 10);
+ if (optarg == endp || *endp != '\0')
+ usage();
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 2)
+ usage();
+
+ dev = argv[0];
+ dir = argv[1];
+
+
+ /*
+ * Resolve the mountpoint with realpath(3) and remove unnecessary
+ * slashes from the devicename if there are any.
+ */
+ (void)checkpath(dir, mntpath);
+ (void)rmslashes(dev, dev);
+
+ /*
+ * UDF file systems are not writeable.
+ */
+ mntflags |= MNT_RDONLY;
+
+ iov[i].iov_base = "fstype";
+ iov[i++].iov_len = sizeof("fstype");
+ iov[i].iov_base = "udf2";
+ iov[i].iov_len = strlen(iov[i].iov_base) + 1;
+ i++;
+
+ iov[i].iov_base = "fspath";
+ iov[i++].iov_len = sizeof("fspath");
+ iov[i].iov_base = mntpath;
+ iov[i++].iov_len = strlen(mntpath) + 1;
+
+ iov[i].iov_base = "from";
+ iov[i++].iov_len = sizeof("from");
+ iov[i].iov_base = dev;
+ iov[i++].iov_len = strlen(dev) + 1;
+
+ iov[i].iov_base = "flags";
+ iov[i++].iov_len = sizeof("flags");
+ iov[i].iov_base = &udf_flags;
+ iov[i++].iov_len = sizeof(udf_flags);
+
+ iov[i].iov_base = "udf_args";
+ iov[i++].iov_len = sizeof("udf_args");
+ iov[i].iov_base = &args;
+ iov[i++].iov_len = sizeof(args);
+
+ if (udf_flags & UDFMNT_KICONV) {
+ iov[i].iov_base = "cs_disk";
+ iov[i++].iov_len = sizeof("cs_disk");
+ iov[i].iov_base = cs_disk;
+ iov[i++].iov_len = strlen(cs_disk) + 1;
+ iov[i].iov_base = "cs_local";
+ iov[i++].iov_len = sizeof("cs_local");
+ iov[i].iov_base = cs_local;
+ iov[i++].iov_len = strlen(cs_local) + 1;
+ }
+ if (nmount(iov, i, mntflags) < 0)
+ err(1, "%s", dev);
+ exit(0);
+}
+
+static int
+set_charset(char **cs_disk, char **cs_local, const char *localcs)
+{
+ int error;
+
+ if (modfind("udf2_iconv") < 0)
+ if (kldload("udf2_iconv") < 0 || modfind("udf2_iconv") < 0) {
+ warnx( "cannot find or load \"udf2_iconv\" kernel module");
+ return (-1);
+ }
+
+ if ((*cs_disk = malloc(ICONV_CSNMAXLEN)) == NULL)
+ return (-1);
+ if ((*cs_local = malloc(ICONV_CSNMAXLEN)) == NULL)
+ return (-1);
+ strncpy(*cs_disk, ENCODING_UNICODE, ICONV_CSNMAXLEN);
+ strncpy(*cs_local, localcs, ICONV_CSNMAXLEN);
+ error = kiconv_add_xlat16_cspairs(*cs_disk, *cs_local);
+ if (error)
+ return (-1);
+
+ return (0);
+}
+
+static void
+usage(void)
+{
+ (void)fprintf(stderr,
+ "usage: mount_udf2 [-v] [-o options] [-C charset] [-s session] "
+ "[-g gid] [-u uid] special node\n");
+ exit(EX_USAGE);
+}
Index: sbin/mount_udf2/mount_udf2.8
===================================================================
--- /dev/null
+++ sbin/mount_udf2/mount_udf2.8
@@ -0,0 +1,115 @@
+.\" Copyright (c) 2002
+.\" Scott Long <scottl@FreeBSD.org>
+.\" Jeroen Ruigrok van der Werven <asmodai@wxs.nl>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd March 23, 2002
+.Dt MOUNT_UDF2 8
+.Os
+.Sh NAME
+.Nm mount_udf2
+.Nd mount a UDF file system
+.Sh SYNOPSIS
+.Nm
+.Op Fl v
+.Op Fl c
+.Op Fl g Ar gid
+.Op Fl o Ar options
+.Op Fl C Ar charset
+.Op Fl s Ar session
+.Op Fl u Ar uid
+.Ar special node
+.Sh DESCRIPTION
+The
+.Nm
+utility attaches the UDF file system residing on the device
+.Ar special
+to the global file system namespace at the location indicated by
+.Ar node .
+.Pp
+The options are as follows:
+.Bl -tag -width indent
+.It Fl c
+Close the session after unmount creating remountable snapshots.
+Closing a session also allows -ROM devices to read the disc created.
+Note that this option only makes sense when mounting sequential
+recordable media like CD-R and DVD*R.
+.It Fl g Ar gid
+Set the group of anonymous files on the file system.
+The default group is the nobody group.
+.It Fl o
+Options are specified with a
+.Fl o
+flag followed by a comma separated string of options.
+See the
+.Xr mount 8
+man page for possible options and their meanings.
+The following UDF specific options are available:
+.It Fl v
+Be verbose about mounting the UDF file system.
+.It Fl C Ar charset
+Specify local
+.Ar charset
+to convert Unicode file names.
+.It Fl s Ar session
+Select the session
+.Ar session
+to be mounted instead of the default last one.
+Implements readonly snapshots on sequential media.
+Positive
+.Ar session
+values indicate an absolute session number.
+Negative
+.Ar session
+values are relative to the last session found on the disc.
+Note that this option only makes sense when mounting sequential
+recordable media like CD-R and DVD*R.
+.It Fl u Ar uid
+Set the owner of anonymous files on the file system.
+The default owner is the user nobody.
+.El
+.Sh SEE ALSO
+.Xr cdcontrol 1 ,
+.Xr mount 2 ,
+.Xr unmount 2 ,
+.Xr fstab 5 ,
+.Xr mount 8
+.Sh NOTES
+UDF is a file system defined by the OSTA standardization group and
+is tailored for data interchange on optical discs (like CDs and
+DVDs) between different operating systems.
+Its also more and more common on other media like Compact
+Flash (CF) cards.
+.Pp
+Read-only access is supported for all media types that CD/DVD/BD type drives
+can recognise including DVD-RAM.
+.Pp
+All current UDF versions up to version 2.60 are supported.
+.Sh HISTORY
+The
+.Nm
+utility first appeared in
+.Fx 5.0 .
Index: sys/fs/udf2/Makefile
===================================================================
--- /dev/null
+++ sys/fs/udf2/Makefile
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}
+
+KMOD= udf2
+
+SRCS= udf_readwrite.c udf_subr.c udf_allocation.c \
+ udf_osta.c udf_vfsops.c udf_vnops.c udf_filenames.c
+# udf_strat_bootstrap.c udf_strat_direct.c udf_strat_rmw.c udf_strat_sequential.c
+SRCS+= vnode_if.h
+EXPORT_SYMS= udf_iconv
+
+.include <bsd.kmod.mk>
Index: sys/fs/udf2/ecma167-udf.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/ecma167-udf.h
@@ -0,0 +1,835 @@
+/* $NetBSD: ecma167-udf.h,v 1.14 2011/07/07 17:45:38 reinoud Exp $ */
+
+/*-
+ * Copyright (c) 2003, 2004, 2005, 2006, 2008, 2009
+ * Reinoud Zandijk * <reinoud@NetBSD.org>
+ * Copyright (c) 2001, 2002 Scott Long <scottl@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ * Extended and adapted for UDFv2.50+ bij Reinoud Zandijk based on the
+ * original by Scott Long.
+ *
+ * 20030508 Made some small typo and explanatory comments
+ * 20030510 Added UDF 2.01 structures
+ * 20030519 Added/correct comments on multi-partitioned logical volume space
+ * 20050616 Added pseudo overwrite
+ * 20050624 Added the missing extended attribute types and `magic values'.
+ * 20051106 Reworked some implementation use parts
+ *
+ */
+
+
+#ifndef _FS_UDF_ECMA167_UDF_H_
+#define _FS_UDF_ECMA167_UDF_H_
+
+
+/*
+ * in case of an older gcc versions, define the __packed as explicit
+ * attribute
+ */
+
+/*
+ * You may specify the `aligned' and `transparent_union' attributes either in
+ * a `typedef' declaration or just past the closing curly brace of a complete
+ * enum, struct or union type _definition_ and the `packed' attribute only
+ * past the closing brace of a definition. You may also specify attributes
+ * between the enum, struct or union tag and the name of the type rather than
+ * after the closing brace.
+*/
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+
+/* ecma167-udf.h */
+
+/* Volume recognition sequence ECMA 167 rev. 3 16.1 */
+struct vrs_desc {
+ uint8_t struct_type;
+ uint8_t identifier[5];
+ uint8_t version;
+ uint8_t data[2041];
+} __packed;
+
+
+#define VRS_NSR02 "NSR02"
+#define VRS_NSR03 "NSR03"
+#define VRS_BEA01 "BEA01"
+#define VRS_TEA01 "TEA01"
+#define VRS_CD001 "CD001"
+#define VRS_CDW02 "CDW02"
+
+
+/* Structure/definitions/constants a la ECMA 167 rev. 3 */
+
+
+#define MAX_TAGID_VOLUMES 9
+/* Tag identifiers */
+enum {
+ TAGID_SPARING_TABLE = 0,
+ TAGID_PRI_VOL = 1,
+ TAGID_ANCHOR = 2,
+ TAGID_VOL = 3,
+ TAGID_IMP_VOL = 4,
+ TAGID_PARTITION = 5,
+ TAGID_LOGVOL = 6,
+ TAGID_UNALLOC_SPACE = 7,
+ TAGID_TERM = 8,
+ TAGID_LOGVOL_INTEGRITY= 9,
+ TAGID_FSD = 256,
+ TAGID_FID = 257,
+ TAGID_ALLOCEXTENT = 258,
+ TAGID_INDIRECTENTRY = 259,
+ TAGID_ICB_TERM = 260,
+ TAGID_FENTRY = 261,
+ TAGID_EXTATTR_HDR = 262,
+ TAGID_UNALL_SP_ENTRY = 263,
+ TAGID_SPACE_BITMAP = 264,
+ TAGID_PART_INTEGRITY = 265,
+ TAGID_EXTFENTRY = 266,
+ TAGID_MAX = 266
+};
+
+
+enum {
+ UDF_DOMAIN_FLAG_HARD_WRITE_PROTECT = 1,
+ UDF_DOMAIN_FLAG_SOFT_WRITE_PROTECT = 2
+};
+
+
+enum {
+ UDF_ACCESSTYPE_NOT_SPECIFIED = 0, /* unknown */
+ UDF_ACCESSTYPE_PSEUDO_OVERWITE = 0, /* pseudo overwritable, e.g. BD-R's LOW */
+ UDF_ACCESSTYPE_READ_ONLY = 1, /* really only readable */
+ UDF_ACCESSTYPE_WRITE_ONCE = 2, /* write once and you're done */
+ UDF_ACCESSTYPE_REWRITEABLE = 3, /* may need extra work to rewrite */
+ UDF_ACCESSTYPE_OVERWRITABLE = 4 /* no limits on rewriting; e.g. harddisc*/
+};
+
+
+/* Descriptor tag [3/7.2] */
+struct desc_tag {
+ uint16_t id;
+ uint16_t descriptor_ver;
+ uint8_t cksum;
+ uint8_t reserved;
+ uint16_t serial_num;
+ uint16_t desc_crc;
+ uint16_t desc_crc_len;
+ uint32_t tag_loc;
+} __packed;
+#define UDF_DESC_TAG_LENGTH 16
+
+
+/* Recorded Address [4/7.1] */
+struct lb_addr { /* within partition space */
+ uint32_t lb_num;
+ uint16_t part_num;
+} __packed;
+
+
+/* Extent Descriptor [3/7.1] */
+struct extent_ad {
+ uint32_t len;
+ uint32_t loc;
+} __packed;
+
+
+/* Short Allocation Descriptor [4/14.14.1] */
+struct short_ad {
+ uint32_t len;
+ uint32_t lb_num;
+} __packed;
+
+
+/* Long Allocation Descriptor [4/14.14.2] */
+struct UDF_ADImp_use {
+ uint16_t flags;
+ uint32_t unique_id;
+} __packed;
+#define UDF_ADIMP_FLAGS_EXTENT_ERASED 1
+
+
+struct long_ad {
+ uint32_t len;
+ struct lb_addr loc; /* within a logical volume mapped partition space !! */
+ union {
+ uint8_t bytes[6];
+ struct UDF_ADImp_use im_used;
+ } impl;
+} __packed;
+#define longad_uniqueid impl.im_used.unique_id
+
+
+/* Extended Allocation Descriptor [4/14.14.3] ; identifies an extent of allocation descriptors ; also in UDF ? */
+struct ext_ad {
+ uint32_t ex_len;
+ uint32_t rec_len;
+ uint32_t inf_len;
+ struct lb_addr ex_loc;
+ uint8_t reserved[2];
+} __packed;
+
+
+/* ICB : Information Control Block; positioning */
+union icb {
+ struct short_ad s_ad;
+ struct long_ad l_ad;
+ struct ext_ad e_ad;
+};
+
+
+/* short/long/ext extent have flags encoded in length */
+#define UDF_EXT_ALLOCATED (0<<30)
+#define UDF_EXT_FREED (1<<30)
+#define UDF_EXT_ALLOCATED_BUT_NOT_USED (1<<30)
+#define UDF_EXT_FREE (2<<30)
+#define UDF_EXT_REDIRECT (3<<30)
+#define UDF_EXT_FLAGS(len) ((len) & (3<<30))
+#define UDF_EXT_LEN(len) ((len) & ((1<<30)-1))
+#define UDF_EXT_MAXLEN ((1<<30)-1)
+
+
+/* Character set spec [1/7.2.1] */
+struct charspec {
+ uint8_t type;
+ uint8_t inf[63];
+} __packed;
+
+
+struct pathcomp {
+ uint8_t type;
+ uint8_t l_ci;
+ uint16_t comp_filever;
+ uint8_t ident[256];
+} __packed;
+#define UDF_PATH_COMP_SIZE 4
+#define UDF_PATH_COMP_RESERVED 0
+#define UDF_PATH_COMP_ROOT 1
+#define UDF_PATH_COMP_MOUNTROOT 2
+#define UDF_PATH_COMP_PARENTDIR 3
+#define UDF_PATH_COMP_CURDIR 4
+#define UDF_PATH_COMP_NAME 5
+
+
+/* Timestamp [1/7.3] */
+struct timestamp {
+ uint16_t type_tz;
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ uint8_t centisec;
+ uint8_t hund_usec;
+ uint8_t usec;
+} __packed;
+#define UDF_TIMESTAMP_SIZE 12
+
+
+/* Entity Identifier [1/7.4] */
+#define UDF_REGID_ID_SIZE 23
+struct regid {
+ uint8_t flags;
+ uint8_t id[UDF_REGID_ID_SIZE];
+ uint8_t id_suffix[8];
+} __packed;
+
+
+/* ICB Tag [4/14.6] */
+struct icb_tag {
+ uint32_t prev_num_dirs;
+ uint16_t strat_type;
+ uint8_t strat_param[2];
+ uint16_t max_num_entries;
+ uint8_t reserved;
+ uint8_t file_type;
+ struct lb_addr parent_icb;
+ uint16_t flags;
+} __packed;
+#define UDF_ICB_TAG_FLAGS_ALLOC_MASK 0x03
+#define UDF_ICB_SHORT_ALLOC 0x00
+#define UDF_ICB_LONG_ALLOC 0x01
+#define UDF_ICB_EXT_ALLOC 0x02
+#define UDF_ICB_INTERN_ALLOC 0x03
+
+#define UDF_ICB_TAG_FLAGS_DIRORDERED (1<< 3)
+#define UDF_ICB_TAG_FLAGS_NONRELOC (1<< 4)
+#define UDF_ICB_TAG_FLAGS_CONTIGUES (1<< 9)
+#define UDF_ICB_TAG_FLAGS_MULTIPLEVERS (1<<12)
+
+#define UDF_ICB_TAG_FLAGS_SETUID (1<< 6)
+#define UDF_ICB_TAG_FLAGS_SETGID (1<< 7)
+#define UDF_ICB_TAG_FLAGS_STICKY (1<< 8)
+
+#define UDF_ICB_FILETYPE_UNKNOWN 0
+#define UDF_ICB_FILETYPE_UNALLOCSPACE 1
+#define UDF_ICB_FILETYPE_PARTINTEGRITY 2
+#define UDF_ICB_FILETYPE_INDIRECTENTRY 3
+#define UDF_ICB_FILETYPE_DIRECTORY 4
+#define UDF_ICB_FILETYPE_RANDOMACCESS 5
+#define UDF_ICB_FILETYPE_BLOCKDEVICE 6
+#define UDF_ICB_FILETYPE_CHARDEVICE 7
+#define UDF_ICB_FILETYPE_EXTATTRREC 8
+#define UDF_ICB_FILETYPE_FIFO 9
+#define UDF_ICB_FILETYPE_SOCKET 10
+#define UDF_ICB_FILETYPE_TERM 11
+#define UDF_ICB_FILETYPE_SYMLINK 12
+#define UDF_ICB_FILETYPE_STREAMDIR 13
+#define UDF_ICB_FILETYPE_VAT 248
+#define UDF_ICB_FILETYPE_REALTIME 249
+#define UDF_ICB_FILETYPE_META_MAIN 250
+#define UDF_ICB_FILETYPE_META_MIRROR 251
+#define UDF_ICB_FILETYPE_META_BITMAP 252
+
+
+/* Anchor Volume Descriptor Pointer [3/10.2] */
+struct anchor_vdp {
+ struct desc_tag tag;
+ struct extent_ad main_vds_ex; /* to main volume descriptor set ; 16 sectors min */
+ struct extent_ad reserve_vds_ex; /* copy of main volume descriptor set ; 16 sectors min */
+} __packed;
+
+
+/* Volume Descriptor Pointer [3/10.3] */
+struct vol_desc_ptr {
+ struct desc_tag tag; /* use for extending the volume descriptor space */
+ uint32_t vds_number;
+ struct extent_ad next_vds_ex; /* points to the next block for volume descriptor space */
+} __packed;
+
+
+/* Primary Volume Descriptor [3/10.1] */
+struct pri_vol_desc {
+ struct desc_tag tag;
+ uint32_t seq_num; /* MAX prevail */
+ uint32_t pvd_num; /* assigned by author; 0 is special as in it may only occur once */
+ char vol_id[32]; /* KEY ; main identifier of this disc */
+ uint16_t vds_num; /* volume descriptor number; i.e. what volume number is it */
+ uint16_t max_vol_seq; /* maximum volume descriptor number known */
+ uint16_t ichg_lvl;
+ uint16_t max_ichg_lvl;
+ uint32_t charset_list;
+ uint32_t max_charset_list;
+ char volset_id[128]; /* KEY ; if part of a multi-disc set or a band of volumes */
+ struct charspec desc_charset; /* KEY according to ECMA 167 */
+ struct charspec explanatory_charset;
+ struct extent_ad vol_abstract;
+ struct extent_ad vol_copyright;
+ struct regid app_id;
+ struct timestamp time;
+ struct regid imp_id;
+ uint8_t imp_use[64];
+ uint32_t prev_vds_loc; /* location of predecessor _lov ? */
+ uint16_t flags; /* bit 0 : if set indicates volume set name is meaningful */
+ uint8_t reserved[22];
+} __packed;
+
+
+/* UDF specific implementation use part of the implementation use volume descriptor */
+struct udf_lv_info {
+ struct charspec lvi_charset;
+ char logvol_id[128];
+
+ char lvinfo1[36];
+ char lvinfo2[36];
+ char lvinfo3[36];
+
+ struct regid impl_id;
+ uint8_t impl_use[128];
+} __packed;
+
+
+/* Implementation use Volume Descriptor */
+struct impvol_desc {
+ struct desc_tag tag;
+ uint32_t seq_num;
+ struct regid impl_id;
+ union {
+ struct udf_lv_info lv_info;
+ char impl_use[460];
+ } _impl_use;
+} __packed;
+
+
+/* Logical Volume Descriptor [3/10.6] */
+struct logvol_desc {
+ struct desc_tag tag;
+ uint32_t seq_num; /* MAX prevail */
+ struct charspec desc_charset; /* KEY */
+ char logvol_id[128]; /* KEY */
+ uint32_t lb_size;
+ struct regid domain_id;
+ union {
+ struct long_ad fsd_loc; /* to fileset descriptor SEQUENCE */
+ uint8_t logvol_content_use[16];
+ } _lvd_use;
+ uint32_t mt_l; /* Partition map length */
+ uint32_t n_pm; /* Number of partition maps */
+ struct regid imp_id;
+ uint8_t imp_use[128];
+ struct extent_ad integrity_seq_loc;
+ uint8_t maps[1];
+} __packed;
+#define lv_fsd_loc _lvd_use.fsd_loc
+
+#define UDF_INTEGRITY_OPEN 0
+#define UDF_INTEGRITY_CLOSED 1
+
+
+#define UDF_PMAP_SIZE 64
+
+/* Type 1 Partition Map [3/10.7.2] */
+struct part_map_1 {
+ uint8_t type;
+ uint8_t len;
+ uint16_t vol_seq_num;
+ uint16_t part_num;
+} __packed;
+
+
+/* Type 2 Partition Map [3/10.7.3] */
+struct part_map_2 {
+ uint8_t type;
+ uint8_t len;
+ uint8_t reserved[2];
+ struct regid part_id;
+ uint16_t vol_seq_num;
+ uint16_t part_num;
+ uint8_t reserved2[24];
+} __packed;
+
+
+/* Virtual Partition Map [UDF 2.01/2.2.8] */
+struct part_map_virt {
+ uint8_t type;
+ uint8_t len;
+ uint8_t reserved[2];
+ struct regid id;
+ uint16_t vol_seq_num;
+ uint16_t part_num;
+ uint8_t reserved1[24];
+} __packed;
+
+
+/* Sparable Partition Map [UDF 2.01/2.2.9] */
+struct part_map_spare {
+ uint8_t type;
+ uint8_t len;
+ uint8_t reserved[2];
+ struct regid id;
+ uint16_t vol_seq_num;
+ uint16_t part_num;
+ uint16_t packet_len;
+ uint8_t n_st; /* Number of redundant sparing tables range 1-4 */
+ uint8_t reserved1;
+ uint32_t st_size; /* size of EACH sparing table */
+ uint32_t st_loc[1]; /* locations of sparing tables */
+} __packed;
+
+
+/* Metadata Partition Map [UDF 2.50/2.2.10] */
+struct part_map_meta {
+ uint8_t type;
+ uint8_t len;
+ uint8_t reserved[2];
+ struct regid id;
+ uint16_t vol_seq_num;
+ uint16_t part_num;
+ uint32_t meta_file_lbn; /* logical block number for file entry within part_num */
+ uint32_t meta_mirror_file_lbn;
+ uint32_t meta_bitmap_file_lbn;
+ uint32_t alloc_unit_size; /* allocation unit size in blocks */
+ uint16_t alignment_unit_size; /* alignment necessary in blocks */
+ uint8_t flags;
+ uint8_t reserved1[5];
+} __packed;
+#define METADATA_DUPLICATED 1
+
+
+union udf_pmap {
+ uint8_t data[UDF_PMAP_SIZE];
+ struct part_map_1 pm1;
+ struct part_map_2 pm2;
+ struct part_map_virt pmv;
+ struct part_map_spare pms;
+ struct part_map_meta pmm;
+};
+
+
+/* Sparing Map Entry [UDF 2.01/2.2.11] */
+struct spare_map_entry {
+ uint32_t org; /* partition relative address */
+ uint32_t map; /* absolute disc address (!) can be in partition, but doesn't have to be */
+} __packed;
+
+
+/* Sparing Table [UDF 2.01/2.2.11] */
+struct udf_sparing_table {
+ struct desc_tag tag;
+ struct regid id;
+ uint16_t rt_l; /* Relocation Table len */
+ uint8_t reserved[2];
+ uint32_t seq_num;
+ struct spare_map_entry entries[1];
+} __packed;
+
+
+#define UDF_NO_PREV_VAT 0xffffffff
+/* UDF 1.50 VAT suffix [UDF 2.2.10 (UDF 1.50 spec)] */
+struct udf_oldvat_tail {
+ struct regid id; /* "*UDF Virtual Alloc Tbl" */
+ uint32_t prev_vat;
+} __packed;
+
+
+/* VAT table [UDF 2.0.1/2.2.10] */
+struct udf_vat {
+ uint16_t header_len;
+ uint16_t impl_use_len;
+ char logvol_id[128]; /* newer version of the LVD one */
+ uint32_t prev_vat;
+ uint32_t num_files;
+ uint32_t num_directories;
+ uint16_t min_udf_readver;
+ uint16_t min_udf_writever;
+ uint16_t max_udf_writever;
+ uint16_t reserved;
+ uint8_t data[1]; /* impl.use followed by VAT entries (uint32_t) */
+} __packed;
+
+
+/* Space bitmap descriptor as found in the partition header descriptor */
+struct space_bitmap_desc {
+ struct desc_tag tag; /* TagId 264 */
+ uint32_t num_bits; /* number of bits */
+ uint32_t num_bytes; /* bytes that contain it */
+ uint8_t data[1];
+} __packed;
+
+
+/* Unalloc space entry as found in the partition header descriptor */
+struct space_entry_desc {
+ struct desc_tag tag; /* TagId 263 */
+ struct icb_tag icbtag; /* type 1 */
+ uint32_t l_ad; /* in bytes */
+ uint8_t entry[1];
+} __packed;
+
+
+/* Partition header descriptor; in the contents_use of part_desc */
+struct part_hdr_desc {
+ struct short_ad unalloc_space_table;
+ struct short_ad unalloc_space_bitmap;
+ struct short_ad part_integrity_table; /* has to be ZERO for UDF */
+ struct short_ad freed_space_table;
+ struct short_ad freed_space_bitmap;
+ uint8_t reserved[88];
+} __packed;
+
+
+/* Partition Descriptor [3/10.5] */
+struct part_desc {
+ struct desc_tag tag;
+ uint32_t seq_num; /* MAX prevailing */
+ uint16_t flags; /* bit 0 : if set the space is allocated */
+ uint16_t part_num; /* KEY */
+ struct regid contents;
+ union {
+ struct part_hdr_desc part_hdr;
+ uint8_t contents_use[128];
+ } _impl_use;
+ uint32_t access_type; /* R/W, WORM etc. */
+ uint32_t start_loc; /* start of partition with given length */
+ uint32_t part_len;
+ struct regid imp_id;
+ uint8_t imp_use[128];
+ uint8_t reserved[156];
+} __packed;
+#define pd_part_hdr _impl_use.part_hdr
+#define UDF_PART_FLAG_ALLOCATED 1
+
+
+/* Unallocated Space Descriptor (UDF 2.01/2.2.5) */
+struct unalloc_sp_desc {
+ struct desc_tag tag;
+ uint32_t seq_num; /* MAX prevailing */
+ uint32_t alloc_desc_num;
+ struct extent_ad alloc_desc[1];
+} __packed;
+
+
+/* Logical Volume Integrity Descriptor [3/30.10] */
+struct logvolhdr {
+ uint64_t next_unique_id;
+ /* rest reserved */
+} __packed;
+
+
+struct udf_logvol_info {
+ struct regid impl_id;
+ uint32_t num_files;
+ uint32_t num_directories;
+ uint16_t min_udf_readver;
+ uint16_t min_udf_writever;
+ uint16_t max_udf_writever;
+} __packed;
+
+
+struct logvol_int_desc {
+ struct desc_tag tag;
+ struct timestamp time;
+ uint32_t integrity_type;
+ struct extent_ad next_extent;
+ union {
+ struct logvolhdr logvolhdr;
+ int8_t reserved[32];
+ } _impl_use;
+ uint32_t num_part;
+ uint32_t l_iu;
+ uint32_t tables[1]; /* Freespace table, Sizetable, Implementation use */
+} __packed;
+#define lvint_next_unique_id _impl_use.logvolhdr.next_unique_id
+
+
+/* File Set Descriptor [4/14.1] */
+struct fileset_desc {
+ struct desc_tag tag;
+ struct timestamp time;
+ uint16_t ichg_lvl;
+ uint16_t max_ichg_lvl;
+ uint32_t charset_list;
+ uint32_t max_charset_list;
+ uint32_t fileset_num; /* key! */
+ uint32_t fileset_desc_num;
+ struct charspec logvol_id_charset;
+ char logvol_id[128]; /* for recovery */
+ struct charspec fileset_charset;
+ char fileset_id[32]; /* Mountpoint !! */
+ char copyright_file_id[32];
+ char abstract_file_id[32];
+ struct long_ad rootdir_icb; /* to rootdir; icb->virtual ? */
+ struct regid domain_id;
+ struct long_ad next_ex; /* to the next fileset_desc extent */
+ struct long_ad streamdir_icb; /* streamdir; needed? */
+ uint8_t reserved[32];
+} __packed;
+
+
+/* File Identifier Descriptor [4/14.4] */
+struct fileid_desc {
+ struct desc_tag tag;
+ uint16_t file_version_num;
+ uint8_t file_char;
+ uint8_t l_fi; /* Length of file identifier area */
+ struct long_ad icb;
+ uint16_t l_iu; /* Length of implementation use area */
+ uint8_t data[0];
+} __packed;
+#define UDF_FID_SIZE 38
+#define UDF_FILE_CHAR_VIS (1 << 0) /* Invisible */
+#define UDF_FILE_CHAR_DIR (1 << 1) /* Directory */
+#define UDF_FILE_CHAR_DEL (1 << 2) /* Deleted */
+#define UDF_FILE_CHAR_PAR (1 << 3) /* Parent Directory */
+#define UDF_FILE_CHAR_META (1 << 4) /* Stream metadata */
+
+
+/* Extended attributes [4/14.10.1] */
+struct extattrhdr_desc {
+ struct desc_tag tag;
+ uint32_t impl_attr_loc; /* offsets within this descriptor */
+ uint32_t appl_attr_loc; /* ditto */
+} __packed;
+#define UDF_IMPL_ATTR_LOC_NOT_PRESENT 0xffffffff
+#define UDF_APPL_ATTR_LOC_NOT_PRESENT 0xffffffff
+
+
+/* Extended attribute entry [4/48.10.2] */
+struct extattr_entry {
+ uint32_t type;
+ uint8_t subtype;
+ uint8_t reserved[3];
+ uint32_t a_l;
+} __packed;
+
+
+/* Extended attribute entry; type 2048 [4/48.10.8] */
+struct impl_extattr_entry {
+ struct extattr_entry hdr;
+ uint32_t iu_l;
+ struct regid imp_id;
+ uint8_t data[1];
+} __packed;
+
+
+/* Extended attribute entry; type 65 536 [4/48.10.9] */
+struct appl_extattr_entry {
+ struct extattr_entry hdr;
+ uint32_t au_l;
+ struct regid appl_id;
+ uint8_t data[1];
+} __packed;
+
+
+/* File Times attribute entry; type 5 or type 6 [4/48.10.5], [4/48.10.6] */
+struct filetimes_extattr_entry {
+ struct extattr_entry hdr;
+ uint32_t d_l; /* length of times[] data following */
+ uint32_t existence; /* bitmask */
+ struct timestamp times[1]; /* in order of ascending bits */
+} __packed;
+#define UDF_FILETIMES_ATTR_NO 5
+#define UDF_FILETIMES_FILE_CREATION 1
+#define UDF_FILETIMES_FILE_DELETION 4
+#define UDF_FILETIMES_FILE_EFFECTIVE 8
+#define UDF_FILETIMES_FILE_BACKUPED 16
+#define UDF_FILETIMES_ATTR_SIZE(no) (20 + (no)*sizeof(struct timestamp))
+
+
+/* Device Specification Extended Attribute [4/4.10.7] */
+struct device_extattr_entry {
+ struct extattr_entry hdr;
+ uint32_t iu_l; /* length of implementation use */
+ uint32_t major;
+ uint32_t minor;
+ uint8_t data[1]; /* UDF: if nonzero length, contain developer ID regid */
+} __packed;
+#define UDF_DEVICESPEC_ATTR_NO 12
+
+
+/* VAT LV extension Extended Attribute [UDF 3.3.4.5.1.3] 1.50 errata */
+struct vatlvext_extattr_entry {
+ uint64_t unique_id_chk; /* needs to be copy of ICB's */
+ uint32_t num_files;
+ uint32_t num_directories;
+ char logvol_id[128]; /* replaces logvol name */
+} __packed;
+
+
+/* File Entry [4/14.9] */
+struct file_entry {
+ struct desc_tag tag;
+ struct icb_tag icbtag;
+ uint32_t uid;
+ uint32_t gid;
+ uint32_t perm;
+ uint16_t link_cnt;
+ uint8_t rec_format;
+ uint8_t rec_disp_attr;
+ uint32_t rec_len;
+ uint64_t inf_len;
+ uint64_t logblks_rec;
+ struct timestamp atime;
+ struct timestamp mtime;
+ struct timestamp attrtime;
+ uint32_t ckpoint;
+ struct long_ad ex_attr_icb;
+ struct regid imp_id;
+ uint64_t unique_id;
+ uint32_t l_ea; /* Length of extended attribute area */
+ uint32_t l_ad; /* Length of allocation descriptors */
+ uint8_t data[1];
+} __packed;
+#define UDF_FENTRY_SIZE 176
+#define UDF_FENTRY_PERM_USER_MASK 0x07
+#define UDF_FENTRY_PERM_GRP_MASK 0xE0
+#define UDF_FENTRY_PERM_OWNER_MASK 0x1C00
+
+
+/* Extended File Entry [4/48.17] */
+struct extfile_entry {
+ struct desc_tag tag;
+ struct icb_tag icbtag;
+ uint32_t uid;
+ uint32_t gid;
+ uint32_t perm;
+ uint16_t link_cnt;
+ uint8_t rec_format;
+ uint8_t rec_disp_attr;
+ uint32_t rec_len;
+ uint64_t inf_len;
+ uint64_t obj_size;
+ uint64_t logblks_rec;
+ struct timestamp atime;
+ struct timestamp mtime;
+ struct timestamp ctime;
+ struct timestamp attrtime;
+ uint32_t ckpoint;
+ uint32_t reserved1;
+ struct long_ad ex_attr_icb;
+ struct long_ad streamdir_icb;
+ struct regid imp_id;
+ uint64_t unique_id;
+ uint32_t l_ea; /* Length of extended attribute area */
+ uint32_t l_ad; /* Length of allocation descriptors */
+ uint8_t data[1];
+} __packed;
+#define UDF_EXTFENTRY_SIZE 216
+
+
+/* Indirect entry [ecma 48.7] */
+struct indirect_entry {
+ struct desc_tag tag;
+ struct icb_tag icbtag;
+ struct long_ad indirect_icb;
+} __packed;
+
+
+/* Allocation extent descriptor [ecma 48.5] */
+struct alloc_ext_entry {
+ struct desc_tag tag;
+ uint32_t prev_entry;
+ uint32_t l_ad;
+ uint8_t data[1];
+} __packed;
+
+
+union dscrptr {
+ struct desc_tag tag;
+ struct anchor_vdp avdp;
+ struct vol_desc_ptr vdp;
+ struct pri_vol_desc pvd;
+ struct logvol_desc lvd;
+ struct unalloc_sp_desc usd;
+ struct logvol_int_desc lvid;
+ struct impvol_desc ivd;
+ struct part_desc pd;
+ struct fileset_desc fsd;
+ struct fileid_desc fid;
+ struct file_entry fe;
+ struct extfile_entry efe;
+ struct extattrhdr_desc eahd;
+ struct indirect_entry inde;
+ struct alloc_ext_entry aee;
+ struct udf_sparing_table spt;
+ struct space_bitmap_desc sbd;
+ struct space_entry_desc sed;
+};
+
+
+#endif /* !_FS_UDF_ECMA167_UDF_H_ */
+
Index: sys/fs/udf2/udf.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf.h
@@ -0,0 +1,440 @@
+/*-
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _FS_UDF_UDF_H_
+#define _FS_UDF_UDF_H_
+
+#include "udf_osta.h"
+#include "udf_mount.h"
+#include "udfio.h"
+
+/* lets see debug stuff for now */
+#define DEBUG
+
+/* debug section */
+extern int udf_verbose;
+
+/* undefine UDF_COMPLETE_DELETE to need `purge'; but purge is not implemented */
+#define UDF_COMPLETE_DELETE
+
+/* debug categories */
+#define UDF_DEBUG_VOLUMES 0x0000001
+#define UDF_DEBUG_LOCKING 0x0000002
+#define UDF_DEBUG_NODE 0x0000004
+#define UDF_DEBUG_LOOKUP 0x0000008
+#define UDF_DEBUG_READDIR 0x0000010
+#define UDF_DEBUG_FIDS 0x0000020
+#define UDF_DEBUG_DESCRIPTOR 0x0000040
+#define UDF_DEBUG_TRANSLATE 0x0000080
+#define UDF_DEBUG_STRATEGY 0x0000100
+#define UDF_DEBUG_READ 0x0000200
+#define UDF_DEBUG_WRITE 0x0000400
+#define UDF_DEBUG_CALL 0x0000800
+#define UDF_DEBUG_ATTR 0x0001000
+#define UDF_DEBUG_EXTATTR 0x0002000
+#define UDF_DEBUG_ALLOC 0x0004000
+#define UDF_DEBUG_ADWLK 0x0008000
+#define UDF_DEBUG_DIRHASH 0x0010000
+#define UDF_DEBUG_NOTIMPL 0x0020000
+#define UDF_DEBUG_SHEDULE 0x0040000
+#define UDF_DEBUG_ECCLINE 0x0080000
+#define UDF_DEBUG_SYNC 0x0100000
+#define UDF_DEBUG_PARANOIA 0x0200000
+#define UDF_DEBUG_PARANOIDADWLK 0x0400000
+#define UDF_DEBUG_NODEDUMP 0x0800000
+#define UDF_DEBUG_RESERVE 0x1000000
+
+/* initial value of udf_verbose */
+#define UDF_DEBUGGING 0
+
+#ifdef DEBUG
+#define DPRINTF(name, arg) { \
+ if (udf_verbose & UDF_DEBUG_##name) {\
+ printf arg;\
+ };\
+ }
+#define DPRINTFIF(name, cond, arg) { \
+ if (udf_verbose & UDF_DEBUG_##name) { \
+ if (cond) printf arg;\
+ };\
+ }
+#else
+#define DPRINTF(name, arg) {}
+#define DPRINTFIF(name, cond, arg) {}
+#endif
+
+/* constants to identify what kind of identifier we are dealing with */
+#define UDF_REGID_DOMAIN 1
+#define UDF_REGID_UDF 2
+#define UDF_REGID_IMPLEMENTATION 3
+#define UDF_REGID_APPLICATION 4
+#define UDF_REGID_NAME 99
+
+
+/* DON'T change these: they identify 13thmonkey's UDF implementation */
+#define APP_NAME "*NetBSD UDF"
+#define APP_VERSION_MAIN 0
+#define APP_VERSION_SUB 5
+#define IMPL_NAME "*NetBSD kernel UDF"
+
+
+/* Configuration values */
+#if 0
+#define UDF_ECCBUF_HASHBITS 10
+#define UDF_ECCBUF_HASHSIZE (1<<UDF_ECCBUF_HASHBITS)
+#define UDF_ECCBUF_HASHMASK (UDF_ECCBUF_HASHSIZE -1)
+
+#define UDF_ECCLINE_MAXFREE 5 /* picked, needs calculation */
+#define UDF_ECCLINE_MAXBUSY 100 /* picked, needs calculation */
+
+#define UDF_MAX_MAPPINGS (MAXPHYS/DEV_BSIZE) /* 128 */
+#endif
+#define UDF_VAT_CHUNKSIZE (64*1024) /* picked */
+#define UDF_SYMLINKBUFLEN (64*1024) /* picked */
+
+#define UDF_DISC_SLACK (128) /* picked, at least 64 kb or 128 */
+#define UDF_ISO_VRS_SIZE (32*2048) /* 32 ISO `sectors' */
+
+
+/* structure space */
+#define UDF_ANCHORS 4 /* 256, 512, N-256, N */
+#define UDF_PARTITIONS 4 /* overkill */
+#define UDF_PMAPS 5 /* overkill */
+#define UDF_LVDINT_SEGMENTS 100 /* big overkill */
+#define UDF_LVINT_LOSSAGE 4 /* lose 2 openings */
+#define UDF_MAX_ALLOC_EXTENTS 50 /* overkill */
+
+
+/* constants */
+#define UDF_MAX_NAMELEN 255 /* as per SPEC */
+#define UDF_TRANS_ZERO ((uint64_t) -1)
+#define UDF_TRANS_UNMAPPED ((uint64_t) -2)
+#define UDF_TRANS_INTERN ((uint64_t) -3)
+#define UDF_MAX_SECTOR ((uint64_t) -10) /* high water mark */
+
+
+/* RW content hint for allocation and other purposes */
+#define UDF_C_ABSOLUTE 0 /* blob to write at absolute */
+#define UDF_C_PROCESSED 0 /* not relevant */
+#define UDF_C_USERDATA 1 /* all but userdata is metadata */
+#define UDF_C_DSCR 2 /* update sectornr and CRC */
+#define UDF_C_FLOAT_DSCR 3 /* update sectornr and CRC; sequential */
+#define UDF_C_NODE 4 /* file/dir node, update sectornr and CRC */
+#define UDF_C_FIDS 5 /* update all contained fids */
+#define UDF_C_METADATA_SBM 6 /* space bitmap, update sectornr and CRC */
+#define UDF_C_EXTATTRS 7 /* dunno what to do yet */
+
+/* use unused b_freelistindex for our UDF_C_TYPE */
+#define b_udf_c_type b_freelistindex
+
+
+/* virtual to physical mapping types */
+#define UDF_VTOP_RAWPART UDF_PMAPS /* [0..UDF_PMAPS> are normal */
+
+#define UDF_VTOP_TYPE_RAW 0
+#define UDF_VTOP_TYPE_UNKNOWN 0
+#define UDF_VTOP_TYPE_PHYS 1
+#define UDF_VTOP_TYPE_VIRT 2
+#define UDF_VTOP_TYPE_SPARABLE 3
+#define UDF_VTOP_TYPE_META 4
+
+
+/* allocation strategies */
+#define UDF_ALLOC_INVALID 0
+#define UDF_ALLOC_SEQUENTIAL 1 /* linear on NWA */
+#define UDF_ALLOC_VAT 2 /* VAT handling */
+#define UDF_ALLOC_SPACEMAP 3 /* spacemaps */
+#define UDF_ALLOC_METABITMAP 4 /* metadata bitmap */
+#define UDF_ALLOC_METASEQUENTIAL 5 /* in chunks seq., nodes not seq */
+#define UDF_ALLOC_RELAXEDSEQUENTIAL 6 /* only nodes not seq. */
+
+
+/* logical volume open/close actions */
+#define UDF_OPEN_SESSION 0x01 /* if needed writeout VRS + VDS */
+#define UDF_CLOSE_SESSION 0x02 /* close session after writing VAT */
+#define UDF_FINALISE_DISC 0x04 /* close session after writing VAT */
+#define UDF_WRITE_VAT 0x08 /* sequential VAT filesystem */
+#define UDF_WRITE_LVINT 0x10 /* write out open lvint */
+#define UDF_WRITE_PART_BITMAPS 0x20 /* write out partition space bitmaps */
+#define UDF_APPENDONLY_LVINT 0x40 /* no shifting, only appending */
+#define UDF_WRITE_METAPART_NODES 0x80 /* write out metadata partition nodes*/
+#define UDFLOGVOL_BITS "\20\1OPEN_SESSION\2CLOSE_SESSION\3FINALISE_DISC" \
+ "\4WRITE_VAT\5WRITE_LVINT\6WRITE_PART_BITMAPS" \
+ "\7APPENDONLY_LVINT\10WRITE_METAPART_NODES"
+
+/* logical volume error handling actions */
+#define UDF_UPDATE_TRACKINFO 0x01 /* update trackinfo and re-shedule */
+#define UDF_REMAP_BLOCK 0x02 /* remap the failing block length */
+#define UDFONERROR_BITS "\20\1UPDATE_TRACKINFO\2REMAP_BLOCK"
+
+
+/* readdir cookies */
+#define UDF_DIRCOOKIE_DOT 1
+
+
+/* malloc pools */
+MALLOC_DECLARE(M_UDFTEMP);
+
+//struct pool udf_node_pool;
+struct udf_node;
+struct udf_strategy;
+
+
+struct udf_lvintq {
+ uint32_t start;
+ uint32_t end;
+ uint32_t pos;
+ uint32_t wpos;
+};
+
+
+struct udf_bitmap {
+ uint8_t *blob; /* allocated */
+ uint8_t *bits; /* bits themselves */
+ uint8_t *pages; /* dirty pages */
+ uint32_t max_offset; /* in bits */
+ uint32_t data_pos; /* position in data */
+ uint32_t metadata_pos; /* .. in metadata */
+};
+
+
+struct udf_strat_args {
+ struct udf_mount *ump;
+ struct udf_node *udf_node;
+ struct long_ad *icb;
+ union dscrptr *dscr;
+ struct buf *nestbuf;
+/* kauth_cred_t cred; */ /* Not ever used? */
+ int waitfor;
+};
+
+struct udf_strategy {
+ int (*create_logvol_dscr) (struct udf_strat_args *args);
+ void (*free_logvol_dscr) (struct udf_strat_args *args);
+ int (*read_logvol_dscr) (struct udf_strat_args *args);
+ int (*write_logvol_dscr) (struct udf_strat_args *args);
+ void (*queuebuf) (struct udf_strat_args *args);
+ void (*discstrat_init) (struct udf_strat_args *args);
+ void (*discstrat_finish) (struct udf_strat_args *args);
+};
+
+//extern struct udf_strategy udf_strat_bootstrap;
+//extern struct udf_strategy udf_strat_sequential;
+//extern struct udf_strategy udf_strat_direct;
+//extern struct udf_strategy udf_strat_rmw;
+extern struct udf_strategy udf_strat_readonly;
+
+
+/* pre cleanup */
+struct udf_mount {
+ struct mount *vfs_mountp;
+ struct vnode *devvp;
+ struct cdev *dev;
+ struct g_consumer *geomcp;
+ struct bufobj *bo;
+ struct mmc_discinfo discinfo;
+ struct udf_args mount_args;
+ int flags;
+
+ /* iconv */
+ void *iconv_d2l; /* disk to local */
+#if 0
+ void *iconv_l2d; /* local to disk */
+#endif
+
+ /* format descriptors */
+/* kmutex_t logvol_mutex; */ /* Who needs locks... */
+ struct anchor_vdp *anchors[UDF_ANCHORS]; /* anchors to VDS */
+ struct pri_vol_desc *primary_vol; /* identification */
+ struct logvol_desc *logical_vol; /* main mapping v->p */
+ struct unalloc_sp_desc *unallocated; /* free UDF space */
+ struct impvol_desc *implementation; /* likely reduntant */
+ struct logvol_int_desc *logvol_integrity; /* current integrity */
+ struct part_desc *partitions[UDF_PARTITIONS]; /* partitions */
+ /* logvol_info is derived; points *into* other structures */
+ struct udf_logvol_info *logvol_info; /* integrity descr. */
+
+ /* fileset and root directories */
+ struct fileset_desc *fileset_desc; /* normally one */
+
+ /* tracing logvol integrity history */
+ struct udf_lvintq lvint_trace[UDF_LVDINT_SEGMENTS];
+ int lvopen; /* logvol actions */
+ int lvclose; /* logvol actions */
+
+ /* logical to physical translations */
+ int vtop[UDF_PMAPS+1]; /* vpartnr trans */
+ int vtop_tp[UDF_PMAPS+1]; /* type of trans */
+
+ /* disc allocation / writing method */
+/* kmutex_t allocate_mutex; */
+ int lvreadwrite; /* error handling */
+ int vtop_alloc[UDF_PMAPS+1]; /* alloc scheme */
+ int data_part;
+ int node_part;
+ int fids_part;
+
+ /* sequential track info */
+/* struct mmc_trackinfo data_track;
+ struct mmc_trackinfo metadata_track; */
+
+ /* VAT */
+ uint32_t first_possible_vat_location;
+ uint32_t last_possible_vat_location;
+ uint32_t vat_entries;
+ uint32_t vat_offset; /* offset in table */
+ uint32_t vat_last_free_lb; /* last free lb_num */
+ uint32_t vat_table_len;
+ uint32_t vat_table_alloc_len;
+ uint8_t *vat_table;
+ uint8_t *vat_pages; /* TODO */
+ struct udf_node *vat_node; /* system node */
+
+ /* space bitmaps for physical partitions */
+ struct space_bitmap_desc*part_unalloc_dscr[UDF_PARTITIONS];
+ struct space_bitmap_desc*part_freed_dscr [UDF_PARTITIONS];
+ struct udf_bitmap part_unalloc_bits[UDF_PARTITIONS];
+ struct udf_bitmap part_freed_bits [UDF_PARTITIONS];
+
+ /* sparable */
+ uint32_t sparable_packet_size;
+ uint32_t packet_size;
+ struct udf_sparing_table*sparing_table;
+
+ /* meta */
+ struct udf_node *metadata_node; /* system node */
+ struct udf_node *metadatamirror_node; /* system node */
+ struct udf_node *metadatabitmap_node; /* system node */
+ struct space_bitmap_desc*metadata_unalloc_dscr;
+ struct udf_bitmap metadata_unalloc_bits;
+ uint32_t metadata_alloc_unit_size;
+ uint16_t metadata_alignment_unit_size;
+ uint8_t metadata_flags;
+
+ /* rb tree for lookup icb to udf_node and sorted list for sync */
+/* kmutex_t ihash_lock;
+ kmutex_t get_node_lock; */
+/* struct rb_tree udf_node_tree; */
+
+ /* syncing */
+ int syncing; /* are we syncing? */
+/* kcondvar_t dirtynodes_cv; */ /* sleeping on sync */
+
+ /* late allocation */
+ int32_t uncommitted_lbs[UDF_PARTITIONS];
+// struct long_ad *la_node_ad_cpy; /* issue buf */
+// uint64_t *la_lmapping, *la_pmapping; /* issue buf */
+
+ /* lists */
+ STAILQ_HEAD(udfmntpts, udf_mount) all_udf_mntpnts;
+
+ /* device strategy */
+ struct udf_strategy *strategy;
+ void *strategy_private;
+};
+
+#if 0
+#define RBTOUDFNODE(node) \
+ ((node) ? \
+ (void *)((uintptr_t)(node) - offsetof(struct udf_node, rbnode)) \
+ : NULL)
+#endif
+
+/*
+ * UDF node describing a file/directory.
+ *
+ * BUGALERT claim node_mutex before reading/writing to prevent inconsistencies !
+ */
+struct udf_node {
+/* struct genfs_node i_gnode; */ /* has to be first */
+ struct vnode *vnode; /* vnode associated */
+ struct udf_mount *ump;
+
+ ino_t hash_id; /* should contain inode */
+ int diroff; /* used in lookup */
+/* kmutex_t node_mutex;
+ kcondvar_t node_lock; */ /* sleeping lock */
+ char const *lock_fname;
+ int lock_lineno;
+
+ /* rb_node for fast lookup and fast sequential visiting */
+/* struct rb_node rbnode; */
+
+ /* one of `fe' or `efe' can be set, not both (UDF file entry dscr.) */
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct alloc_ext_entry *ext[UDF_MAX_ALLOC_EXTENTS];
+ int num_extensions;
+
+ /* location found, recording location & hints */
+ struct long_ad loc; /* FID/hash loc. */
+ struct long_ad write_loc; /* strat 4096 loc */
+ int needs_indirect; /* has missing indr. */
+ struct long_ad ext_loc[UDF_MAX_ALLOC_EXTENTS];
+
+ struct dirhash *dir_hash;
+
+ /* misc */
+ uint32_t i_flags; /* associated flags */
+ struct lockf *lockf; /* lock list */
+ uint32_t outstanding_bufs; /* file data */
+ uint32_t outstanding_nodedscr; /* node dscr */
+ int32_t uncommitted_lbs; /* in UBC */
+
+ /* references to associated nodes */
+ struct udf_node *extattr;
+ struct udf_node *streamdir;
+ struct udf_node *my_parent; /* if extended attr. */
+};
+
+
+/* misc. flags stored in i_flags (XXX needs cleaning up) */
+#define IN_ACCESS 0x0001 /* Inode access time update request */
+#define IN_CHANGE 0x0002 /* Inode change time update request */
+#define IN_UPDATE 0x0004 /* Inode was written to; update mtime*/
+#define IN_MODIFY 0x0008 /* Modification time update request */
+#define IN_MODIFIED 0x0010 /* node has been modified */
+#define IN_ACCESSED 0x0020 /* node has been accessed */
+#define IN_RENAME 0x0040 /* node is being renamed. XXX ?? */
+#define IN_DELETED 0x0080 /* node is unlinked, no FID reference*/
+#define IN_LOCKED 0x0100 /* node is locked by condvar */
+#define IN_SYNCED 0x0200 /* node is being used by sync */
+#define IN_CALLBACK_ULK 0x0400 /* node will be unlocked by callback */
+#define IN_NODE_REBUILD 0x0800 /* node is rebuild */
+
+
+#define IN_FLAGBITS \
+ "\10\1IN_ACCESS\2IN_CHANGE\3IN_UPDATE\4IN_MODIFY\5IN_MODIFIED" \
+ "\6IN_ACCESSED\7IN_RENAME\10IN_DELETED\11IN_LOCKED\12IN_SYNCED" \
+ "\13IN_CALLBACK_ULK\14IN_NODE_REBUILD"
+
+struct udf_fid {
+ u_short len; /* length of data in bytes */
+ u_short padding; /* force longword alignment */
+ ino_t ino;
+};
+
+#endif /* !_FS_UDF_UDF_H_ */
Index: sys/fs/udf2/udf_allocation.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_allocation.c
@@ -0,0 +1,3310 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ * Copyright (c) 2012 Will DeVries
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/malloc.h>
+
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+
+
+#if 0
+static void udf_record_allocation_in_node(struct udf_mount *ump,
+ struct buf *buf, uint16_t vpart_num, uint64_t *mapping,
+ struct long_ad *node_ad_cpy);
+
+static void udf_collect_free_space_for_vpart(struct udf_mount *ump,
+ uint16_t vpart_num, uint32_t num_lb);
+
+static void udf_wipe_adslots(struct udf_node *udf_node);
+static void udf_count_alloc_exts(struct udf_node *udf_node);
+
+/*
+ * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
+ * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
+ * since actions are most likely sequential and thus seeking doesn't need
+ * searching for the same or adjacent position again.
+ */
+
+/* --------------------------------------------------------------------- */
+
+#if 0
+#if 1
+static void
+udf_node_dump(struct udf_node *udf_node) {
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ struct long_ad s_ad;
+ uint64_t inflen;
+ uint32_t icbflags, addr_type;
+ uint32_t len, lb_num;
+ uint32_t flags;
+ int part_num;
+ int lb_size, eof, slot;
+
+ if ((udf_verbose & UDF_DEBUG_NODEDUMP) == 0)
+ return;
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ } else {
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ }
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ printf("udf_node_dump %p :\n", udf_node);
+
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ printf("\tIntern alloc, len = %"PRIu64"\n", inflen);
+ return;
+ }
+
+ printf("\tInflen = %"PRIu64"\n", inflen);
+ printf("\t\t");
+
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+ part_num = le16toh(s_ad.loc.part_num);
+ lb_num = le32toh(s_ad.loc.lb_num);
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ printf("[");
+ if (part_num >= 0)
+ printf("part %d, ", part_num);
+ printf("lb_num %d, len %d", lb_num, len);
+ if (flags)
+ printf(", flags %d", flags>>30);
+ printf("] ");
+
+ if (flags == UDF_EXT_REDIRECT) {
+ printf("\n\textent END\n\tallocation extent\n\t\t");
+ }
+
+ slot++;
+ }
+ printf("\n\tl_ad END\n\n");
+}
+#else
+#define udf_node_dump(a)
+#endif
+
+
+static void
+udf_assert_allocated(struct udf_mount *ump, uint16_t vpart_num,
+ uint32_t lb_num, uint32_t num_lb)
+{
+ struct udf_bitmap *bitmap;
+ struct part_desc *pdesc;
+ uint32_t ptov;
+ uint32_t bitval;
+ uint8_t *bpos;
+ int bit;
+ int phys_part;
+ int ok;
+
+ DPRINTF(PARANOIA, ("udf_assert_allocated: check virt lbnum %d "
+ "part %d + %d sect\n", lb_num, vpart_num, num_lb));
+
+ /* get partition backing up this vpart_num */
+ pdesc = ump->partitions[ump->vtop[vpart_num]];
+
+ switch (ump->vtop_tp[vpart_num]) {
+ case UDF_VTOP_TYPE_PHYS :
+ case UDF_VTOP_TYPE_SPARABLE :
+ /* free space to freed or unallocated space bitmap */
+ ptov = le32toh(pdesc->start_loc);
+ phys_part = ump->vtop[vpart_num];
+
+ /* use unallocated bitmap */
+ bitmap = &ump->part_unalloc_bits[phys_part];
+
+ /* if no bitmaps are defined, bail out */
+ if (bitmap->bits == NULL)
+ break;
+
+ /* check bits */
+ KASSERT(bitmap->bits);
+ ok = 1;
+ bpos = bitmap->bits + lb_num/8;
+ bit = lb_num % 8;
+ while (num_lb > 0) {
+ bitval = (1 << bit);
+ DPRINTF(PARANOIA, ("XXX : check %d, %p, bit %d\n",
+ lb_num, bpos, bit));
+ KASSERT(bitmap->bits + lb_num/8 == bpos);
+ if (*bpos & bitval) {
+ printf("\tlb_num %d is NOT marked busy\n",
+ lb_num);
+ ok = 0;
+ }
+ lb_num++; num_lb--;
+ bit = (bit + 1) % 8;
+ if (bit == 0)
+ bpos++;
+ }
+ if (!ok) {
+ /* KASSERT(0); */
+ }
+
+ break;
+ case UDF_VTOP_TYPE_VIRT :
+ /* TODO check space */
+ KASSERT(num_lb == 1);
+ break;
+ case UDF_VTOP_TYPE_META :
+ /* TODO check space in the metadata bitmap */
+ default:
+ /* not implemented */
+ break;
+ }
+}
+
+
+static void
+udf_node_sanity_check(struct udf_node *udf_node,
+ uint64_t *cnt_inflen, uint64_t *cnt_logblksrec)
+{
+ union dscrptr *dscr;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ struct long_ad s_ad;
+ uint64_t inflen, logblksrec;
+ uint32_t icbflags, addr_type;
+ uint32_t len, lb_num, l_ea, l_ad, max_l_ad;
+ uint16_t part_num;
+ uint8_t *data_pos;
+ int dscr_size, lb_size, flags, whole_lb;
+ int i, slot, eof;
+
+ KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
+
+ if (1)
+ udf_node_dump(udf_node);
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ dscr = (union dscrptr *) fe;
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ dscr_size = sizeof(struct file_entry) -1;
+ logblksrec = le64toh(fe->logblks_rec);
+ l_ad = le32toh(fe->l_ad);
+ l_ea = le32toh(fe->l_ea);
+ } else {
+ dscr = (union dscrptr *) efe;
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ dscr_size = sizeof(struct extfile_entry) -1;
+ logblksrec = le64toh(efe->logblks_rec);
+ l_ad = le32toh(efe->l_ad);
+ l_ea = le32toh(efe->l_ea);
+ }
+ data_pos = (uint8_t *) dscr + dscr_size + l_ea;
+ max_l_ad = lb_size - dscr_size - l_ea;
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /* check if tail is zero */
+ DPRINTF(PARANOIA, ("Sanity check blank tail\n"));
+ for (i = l_ad; i < max_l_ad; i++) {
+ if (data_pos[i] != 0)
+ printf( "sanity_check: violation: node byte %d "
+ "has value %d\n", i, data_pos[i]);
+ }
+
+ /* reset counters */
+ *cnt_inflen = 0;
+ *cnt_logblksrec = 0;
+
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ KASSERT(l_ad <= max_l_ad);
+ KASSERT(l_ad == inflen);
+ *cnt_inflen = inflen;
+ return;
+ }
+
+ /* start counting */
+ whole_lb = 1;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+ KASSERT(whole_lb == 1);
+
+ part_num = le16toh(s_ad.loc.part_num);
+ lb_num = le32toh(s_ad.loc.lb_num);
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags != UDF_EXT_REDIRECT) {
+ *cnt_inflen += len;
+ if (flags == UDF_EXT_ALLOCATED) {
+ *cnt_logblksrec += (len + lb_size -1) / lb_size;
+ }
+ } else {
+ KASSERT(len == lb_size);
+ }
+ /* check allocation */
+ if (flags == UDF_EXT_ALLOCATED)
+ udf_assert_allocated(udf_node->ump, part_num, lb_num,
+ (len + lb_size - 1) / lb_size);
+
+ /* check whole lb */
+ whole_lb = ((len % lb_size) == 0);
+
+ slot++;
+ }
+ /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
+
+ KASSERT(*cnt_inflen == inflen);
+ KASSERT(*cnt_logblksrec == logblksrec);
+
+ KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
+}
+#else
+static void
+udf_node_sanity_check(struct udf_node *udf_node,
+ uint64_t *cnt_inflen, uint64_t *cnt_logblksrec) {
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ uint64_t inflen, logblksrec;
+ int dscr_size, lb_size;
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ dscr_size = sizeof(struct file_entry) -1;
+ logblksrec = le64toh(fe->logblks_rec);
+ } else {
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ dscr_size = sizeof(struct extfile_entry) -1;
+ logblksrec = le64toh(efe->logblks_rec);
+ }
+ *cnt_logblksrec = logblksrec;
+ *cnt_inflen = inflen;
+}
+#endif
+#endif
+/* --------------------------------------------------------------------- */
+
+void
+udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks)
+{
+ struct logvol_int_desc *lvid;
+ uint32_t *pos1, *pos2;
+ int vpart, num_vpart;
+
+ lvid = ump->logvol_integrity;
+ *freeblks = *sizeblks = 0;
+
+ /*
+ * Sequentials media report free space directly (CD/DVD/BD-R), for the
+ * other media we need the logical volume integrity.
+ *
+ * We sum all free space up here regardless of type.
+ */
+
+ KASSERT(lvid, ("lvid is null"));
+ num_vpart = le32toh(lvid->num_part);
+
+#if 0
+ if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
+ /* use track info directly summing if there are 2 open */
+ /* XXX assumption at most two tracks open */
+ *freeblks = ump->data_track.free_blocks;
+ if (ump->data_track.tracknr != ump->metadata_track.tracknr)
+ *freeblks += ump->metadata_track.free_blocks;
+ *sizeblks = ump->discinfo.last_possible_lba;
+ } else {
+#endif
+ /* free and used space for mountpoint based on logvol integrity */
+ for (vpart = 0; vpart < num_vpart; vpart++) {
+ pos1 = &lvid->tables[0] + vpart;
+ pos2 = &lvid->tables[0] + num_vpart + vpart;
+ if (le32toh(*pos1) != (uint32_t) -1) {
+ *freeblks += le32toh(*pos1);
+ *sizeblks += le32toh(*pos2);
+ }
+ }
+#if 0
+ }
+#endif
+ /* adjust for accounted uncommitted blocks */
+ for (vpart = 0; vpart < num_vpart; vpart++)
+ *freeblks -= ump->uncommitted_lbs[vpart];
+
+ if (*freeblks > UDF_DISC_SLACK) {
+ *freeblks -= UDF_DISC_SLACK;
+ } else {
+ *freeblks = 0;
+ }
+}
+
+#if 0
+static void
+udf_calc_vpart_freespace(struct udf_mount *ump, uint16_t vpart_num, uint64_t *freeblks)
+{
+ struct logvol_int_desc *lvid;
+ uint32_t *pos1;
+
+ lvid = ump->logvol_integrity;
+ *freeblks = 0;
+
+ /*
+ * Sequentials media report free space directly (CD/DVD/BD-R), for the
+ * other media we need the logical volume integrity.
+ *
+ * We sum all free space up here regardless of type.
+ */
+
+ KASSERT(lvid);
+ if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
+ /* XXX assumption at most two tracks open */
+ if (vpart_num == ump->data_part) {
+ *freeblks = ump->data_track.free_blocks;
+ } else {
+ *freeblks = ump->metadata_track.free_blocks;
+ }
+ } else {
+ /* free and used space for mountpoint based on logvol integrity */
+ pos1 = &lvid->tables[0] + vpart_num;
+ if (le32toh(*pos1) != (uint32_t) -1)
+ *freeblks += le32toh(*pos1);
+ }
+
+ /* adjust for accounted uncommitted blocks */
+ if (*freeblks > ump->uncommitted_lbs[vpart_num]) {
+ *freeblks -= ump->uncommitted_lbs[vpart_num];
+ } else {
+ *freeblks = 0;
+ }
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+int
+udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
+ uint32_t *lb_numres, uint32_t *extres)
+{
+ struct part_desc *pdesc;
+ struct spare_map_entry *sme;
+ struct long_ad s_icb_loc;
+ uint64_t foffset, end_foffset;
+ int rel, part, error, eof, slot, flags;
+ uint32_t lb_size, len, lb_num, lb_rel, lb_packet;
+ uint32_t udf_rw32_lbmap, ext_offset;
+ uint16_t vpart;
+
+ KASSERT(ump && icb_loc && lb_numres,("ump && icb_loc && lb_numres"));
+
+ vpart = le16toh(icb_loc->loc.part_num);
+ lb_num = le32toh(icb_loc->loc.lb_num);
+ if (vpart > UDF_VTOP_RAWPART)
+ return (EINVAL);
+
+translate_again:
+ part = ump->vtop[vpart];
+ pdesc = ump->partitions[part];
+
+ switch (ump->vtop_tp[vpart]) {
+ case UDF_VTOP_TYPE_RAW :
+ /* 1:1 to the end of the device */
+ *lb_numres = lb_num;
+ *extres = INT_MAX;
+ return (0);
+ case UDF_VTOP_TYPE_PHYS :
+ /* transform into its disc logical block */
+ if (lb_num > le32toh(pdesc->part_len))
+ return (EINVAL);
+ *lb_numres = lb_num + le32toh(pdesc->start_loc);
+
+ /* extent from here to the end of the partition */
+ *extres = le32toh(pdesc->part_len) - lb_num;
+ return (0);
+ case UDF_VTOP_TYPE_VIRT :
+ /* only maps one logical block, lookup in VAT */
+ if (lb_num >= ump->vat_entries) /* XXX > or >= ? */
+ return (EINVAL);
+
+ /* lookup in virtual allocation table file */
+ /* mutex_enter(&ump->allocate_mutex); */
+ error = udf_vat_read(ump->vat_node,
+ (uint8_t *) &udf_rw32_lbmap, 4,
+ ump->vat_offset + lb_num * 4);
+ /* mutex_exit(&ump->allocate_mutex); */
+
+ if (error)
+ return (error);
+
+ lb_num = le32toh(udf_rw32_lbmap);
+
+ /* transform into its disc logical block */
+ if (lb_num > le32toh(pdesc->part_len))
+ return (EINVAL);
+ *lb_numres = lb_num + le32toh(pdesc->start_loc);
+
+ /* just one logical block */
+ *extres = 1;
+ return (0);
+ case UDF_VTOP_TYPE_SPARABLE :
+ /* check if the packet containing the lb_num is remapped */
+ lb_packet = lb_num / ump->sparable_packet_size;
+ lb_rel = lb_num % ump->sparable_packet_size;
+
+ for (rel = 0; rel < le16toh(ump->sparing_table->rt_l); rel++) {
+ sme = &ump->sparing_table->entries[rel];
+ if (lb_packet == le32toh(sme->org)) {
+ /* NOTE maps to absolute disc logical block! */
+ *lb_numres = le32toh(sme->map) + lb_rel;
+ *extres = ump->sparable_packet_size - lb_rel;
+ return (0);
+ }
+ }
+
+ /* transform into its disc logical block */
+ if (lb_num > le32toh(pdesc->part_len))
+ return (EINVAL);
+ *lb_numres = lb_num + le32toh(pdesc->start_loc);
+
+ /* rest of block */
+ *extres = ump->sparable_packet_size - lb_rel;
+ return (0);
+ case UDF_VTOP_TYPE_META :
+ /* printf("Metadata Partition Translated\n"); */
+ /* we have to look into the file's allocation descriptors */
+
+ /* use metadatafile allocation mutex */
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ UDF_LOCK_NODE(ump->metadata_node, 0);
+
+ /* get first overlapping extent */
+ foffset = 0;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(ump->metadata_node,
+ slot, &s_icb_loc, &eof);
+ DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, "
+ "len = %d, lb_num = %d, part = %d\n",
+ slot, eof,
+ UDF_EXT_FLAGS(le32toh(s_icb_loc.len)),
+ UDF_EXT_LEN(le32toh(s_icb_loc.len)),
+ le32toh(s_icb_loc.loc.lb_num),
+ le16toh(s_icb_loc.loc.part_num)));
+ if (eof) {
+ DPRINTF(TRANSLATE,
+ ("Meta partition translation "
+ "failed: can't seek location\n"));
+ UDF_UNLOCK_NODE(ump->metadata_node, 0);
+ return (EINVAL);
+ }
+ len = le32toh(s_icb_loc.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ end_foffset = foffset + len;
+
+ if (end_foffset > lb_num * lb_size)
+ break; /* found */
+ foffset = end_foffset;
+ slot++;
+ }
+ /* found overlapping slot */
+ ext_offset = lb_num * lb_size - foffset;
+
+ /* process extent offset */
+ lb_num = le32toh(s_icb_loc.loc.lb_num);
+ vpart = le16toh(s_icb_loc.loc.part_num);
+ lb_num += (ext_offset + lb_size -1) / lb_size;
+ ext_offset = 0;
+
+ UDF_UNLOCK_NODE(ump->metadata_node, 0);
+ if (flags != UDF_EXT_ALLOCATED) {
+ DPRINTF(TRANSLATE, ("Metadata partion translation "
+ "failed: not allocated\n"));
+ return (EINVAL);
+ }
+
+ /*
+ * vpart and lb_num are updated, translate again since we
+ * might be mapped on sparable media
+ */
+ goto translate_again;
+ default:
+ printf("UDF vtop translation scheme %d unimplemented yet\n",
+ ump->vtop_tp[vpart]);
+ }
+
+ return (EINVAL);
+}
+
+
+/* XXX provisional primitive braindead version */
+/* TODO use ext_res */
+#if 0
+void
+udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
+ uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping)
+{
+ struct long_ad loc;
+ uint32_t lb_numres, ext_res;
+ int sector;
+
+ for (sector = 0; sector < sectors; sector++) {
+ memset(&loc, 0, sizeof(struct long_ad));
+ loc.loc.part_num = le16toh(vpart_num);
+ loc.loc.lb_num = le32toh(*lmapping);
+ udf_translate_vtop(ump, &loc, &lb_numres, &ext_res);
+ *pmapping = lb_numres;
+ lmapping++; pmapping++;
+ }
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+/*
+ *This is a simplified version of the following function. It is used in
+ * bmap.
+ */
+int
+udf_bmap_translate(struct udf_node *udf_node, uint32_t block,
+ uint64_t *lsector, uint32_t *maxblks)
+{
+ struct udf_mount *ump;
+ struct icb_tag *icbtag;
+ struct long_ad t_ad, s_ad;
+ uint64_t foffset, new_foffset;
+ int eof, error, flags, slot, addr_type, icbflags;
+ uint32_t transsec32, lb_size, ext_offset, lb_num, len;
+ uint32_t ext_remain, translen;
+ uint16_t vpart_num;
+
+ if (!udf_node)
+ return (ENOENT);
+
+ KASSERT(num_lb > 0,("num_lb > 0"));
+
+ UDF_LOCK_NODE(udf_node, 0);
+
+ /* initialise derivative vars */
+ ump = udf_node->ump;
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ if (udf_node->fe) {
+ icbtag = &udf_node->fe->icbtag;
+ } else {
+ icbtag = &udf_node->efe->icbtag;
+ }
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /* do the work */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ *lsector = UDF_TRANS_INTERN;
+ *maxblks = 1;
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (0);
+ }
+
+ /* find first overlapping extent */
+ foffset = 0;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
+ "lb_num = %d, part = %d\n", slot, eof,
+ UDF_EXT_FLAGS(le32toh(s_ad.len)),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ le32toh(s_ad.loc.lb_num),
+ le16toh(s_ad.loc.part_num)));
+ if (eof) {
+ DPRINTF(TRANSLATE,
+ ("Translate file extent "
+ "failed: can't seek location\n"));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (EINVAL);
+ }
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ new_foffset = foffset + len;
+
+ if (new_foffset > block * lb_size)
+ break; /* found */
+ foffset = new_foffset;
+ slot++;
+ }
+ /* found overlapping slot */
+
+ lb_num = le32toh(s_ad.loc.lb_num);
+ vpart_num = le16toh(s_ad.loc.part_num);
+
+ ext_offset = block * lb_size - foffset;
+
+ /* process extent, don't forget to advance on ext_offset! */
+ lb_num += (ext_offset + lb_size -1) / lb_size;
+ ext_remain = (len - ext_offset + lb_size -1) / lb_size;
+
+ /*
+ * note that the while(){} is nessisary for the extent that
+ * the udf_translate_vtop() returns doens't have to span the
+ * whole extent.
+ */
+ switch (flags) {
+ case UDF_EXT_FREE :
+ case UDF_EXT_ALLOCATED_BUT_NOT_USED :
+ *lsector = UDF_TRANS_ZERO;
+ *maxblks = ext_remain;
+ break;
+ case UDF_EXT_ALLOCATED :
+ t_ad.loc.lb_num = htole32(lb_num);
+ t_ad.loc.part_num = htole16(vpart_num);
+ error = udf_translate_vtop(ump,
+ &t_ad, &transsec32, &translen);
+ if (error) {
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (error);
+ }
+ *lsector = transsec32;
+ *maxblks = MIN(ext_remain, translen);
+ break;
+ default:
+ DPRINTF(TRANSLATE, ("Translate file extend "
+ "failed: bad flags %x\n", flags));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (EINVAL);
+ }
+
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ return (0);
+}
+/* --------------------------------------------------------------------- */
+
+/*
+ * Translate an extent (in logical_blocks) into logical block numbers; used
+ * for read and write operations. DOESNT't check extents.
+ */
+int
+udf_translate_file_extent(struct udf_node *udf_node,
+ uint32_t from, uint32_t num_lb,
+ uint64_t *map)
+{
+ struct udf_mount *ump;
+ struct icb_tag *icbtag;
+ struct long_ad t_ad, s_ad;
+ uint64_t transsec;
+ uint64_t foffset, end_foffset;
+ uint32_t transsec32;
+ uint32_t lb_size;
+ uint32_t ext_offset;
+ uint32_t lb_num, len;
+ uint32_t overlap, translen;
+ uint16_t vpart_num;
+ int eof, error, flags;
+ int slot, addr_type, icbflags;
+
+ if (!udf_node)
+ return (ENOENT);
+
+ KASSERT(num_lb > 0, "num_lb > 0");
+
+ UDF_LOCK_NODE(udf_node, 0);
+
+ /* initialise derivative vars */
+ ump = udf_node->ump;
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ if (udf_node->fe) {
+ icbtag = &udf_node->fe->icbtag;
+ } else {
+ icbtag = &udf_node->efe->icbtag;
+ }
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /* do the work */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ *map = UDF_TRANS_INTERN;
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (0);
+ }
+
+ /* find first overlapping extent */
+ foffset = 0;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
+ "lb_num = %d, part = %d\n", slot, eof,
+ UDF_EXT_FLAGS(le32toh(s_ad.len)),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ le32toh(s_ad.loc.lb_num),
+ le16toh(s_ad.loc.part_num)));
+ if (eof) {
+ DPRINTF(TRANSLATE,
+ ("Translate file extent "
+ "failed: can't seek location\n"));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (EINVAL);
+ }
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+ lb_num = le32toh(s_ad.loc.lb_num);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ end_foffset = foffset + len;
+
+ if (end_foffset > from * lb_size)
+ break; /* found */
+ foffset = end_foffset;
+ slot++;
+ }
+ /* found overlapping slot */
+ ext_offset = from * lb_size - foffset;
+
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ DPRINTF(ADWLK, ("slot %d, eof = %d, flags = %d, len = %d, "
+ "lb_num = %d, part = %d\n", slot, eof,
+ UDF_EXT_FLAGS(le32toh(s_ad.len)),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ le32toh(s_ad.loc.lb_num),
+ le16toh(s_ad.loc.part_num)));
+ if (eof) {
+ DPRINTF(TRANSLATE,
+ ("Translate file extent "
+ "failed: past eof\n"));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (EINVAL);
+ }
+
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ lb_num = le32toh(s_ad.loc.lb_num);
+ vpart_num = le16toh(s_ad.loc.part_num);
+
+ end_foffset = foffset + len;
+
+ /* process extent, don't forget to advance on ext_offset! */
+ lb_num += (ext_offset + lb_size -1) / lb_size;
+ overlap = (len - ext_offset + lb_size -1) / lb_size;
+ ext_offset = 0;
+
+ /*
+ * note that the while(){} is nessisary for the extent that
+ * the udf_translate_vtop() returns doens't have to span the
+ * whole extent.
+ */
+
+ overlap = MIN(overlap, num_lb);
+ while (overlap && (flags != UDF_EXT_REDIRECT)) {
+ switch (flags) {
+ case UDF_EXT_FREE :
+ case UDF_EXT_ALLOCATED_BUT_NOT_USED :
+ transsec = UDF_TRANS_ZERO;
+ translen = overlap;
+ while (overlap && num_lb && translen) {
+ *map++ = transsec;
+ lb_num++;
+ overlap--; num_lb--; translen--;
+ }
+ break;
+ case UDF_EXT_ALLOCATED :
+ t_ad.loc.lb_num = le32toh(lb_num);
+ t_ad.loc.part_num = le16toh(vpart_num);
+ error = udf_translate_vtop(ump,
+ &t_ad, &transsec32, &translen);
+ transsec = transsec32;
+ if (error) {
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (error);
+ }
+ while (overlap && num_lb && translen) {
+ *map++ = transsec;
+ lb_num++; transsec++;
+ overlap--; num_lb--; translen--;
+ }
+ break;
+ default:
+ DPRINTF(TRANSLATE,
+ ("Translate file extent "
+ "failed: bad flags %x\n", flags));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return (EINVAL);
+ }
+ }
+ if (num_lb == 0)
+ break;
+
+ if (flags != UDF_EXT_REDIRECT)
+ foffset = end_foffset;
+ slot++;
+ }
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ return (0);
+}
+
+#if 0
+/* --------------------------------------------------------------------- */
+
+static int
+udf_search_free_vatloc(struct udf_mount *ump, uint32_t *lbnumres)
+{
+ uint32_t lb_size, lb_num, lb_map, le32toh_lbmap;
+ uint8_t *blob;
+ int entry, chunk, found, error;
+
+ KASSERT(ump, ("ump is NULL"));
+ KASSERT(ump->logical_vol, ("ump->logical_vol is NULL"));
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ blob = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+
+ /* TODO static allocation of search chunk */
+
+ lb_num = MIN(ump->vat_entries, ump->vat_last_free_lb);
+ found = 0;
+ error = 0;
+ entry = 0;
+ do {
+ chunk = MIN(lb_size, (ump->vat_entries - lb_num) * 4);
+ if (chunk <= 0)
+ break;
+ /* load in chunk */
+ error = udf_vat_read(ump->vat_node, blob, chunk,
+ ump->vat_offset + lb_num * 4);
+
+ if (error)
+ break;
+
+ /* search this chunk */
+ for (entry=0; entry < chunk /4; entry++, lb_num++) {
+ le32toh_lbmap = *((uint32_t *) (blob + entry * 4));
+ lb_map = le32toh(le32toh_lbmap);
+ if (lb_map == 0xffffffff) {
+ found = 1;
+ break;
+ }
+ }
+ } while (!found);
+ if (error) {
+ printf("udf_search_free_vatloc: error reading in vat chunk "
+ "(lb %d, size %d)\n", lb_num, chunk);
+ }
+
+ if (!found) {
+ /* extend VAT */
+ DPRINTF(WRITE, ("udf_search_free_vatloc: extending\n"));
+ lb_num = ump->vat_entries;
+ ump->vat_entries++;
+ }
+
+ /* mark entry with initialiser just in case */
+ lb_map = le32toh(0xfffffffe);
+ udf_vat_write(ump->vat_node, (uint8_t *) &lb_map, 4,
+ ump->vat_offset + lb_num *4);
+ ump->vat_last_free_lb = lb_num;
+
+ free(blob, M_UDFTEMP);
+ *lbnumres = lb_num;
+ return (0);
+}
+
+
+static void
+udf_bitmap_allocate(struct udf_bitmap *bitmap, int ismetadata,
+ uint32_t *num_lb, uint64_t *lmappos)
+{
+ uint32_t offset, lb_num, bit;
+ int32_t diff;
+ uint8_t *bpos;
+ int pass;
+
+ if (!ismetadata) {
+ /* heuristic to keep the two pointers not too close */
+ diff = bitmap->data_pos - bitmap->metadata_pos;
+ if ((diff >= 0) && (diff < 1024))
+ bitmap->data_pos = bitmap->metadata_pos + 1024;
+ }
+ offset = ismetadata ? bitmap->metadata_pos : bitmap->data_pos;
+ offset &= ~7;
+ for (pass = 0; pass < 2; pass++) {
+ if (offset >= bitmap->max_offset)
+ offset = 0;
+
+ while (offset < bitmap->max_offset) {
+ if (*num_lb == 0)
+ break;
+
+ /* use first bit not set */
+ bpos = bitmap->bits + offset/8;
+ bit = ffs(*bpos); /* returns 0 or 1..8 */
+ if (bit == 0) {
+ offset += 8;
+ continue;
+ }
+
+ /* check for ffs overshoot */
+ if (offset + bit-1 >= bitmap->max_offset) {
+ offset = bitmap->max_offset;
+ break;
+ }
+
+ DPRINTF(PARANOIA, ("XXX : allocate %d, %p, bit %d\n",
+ offset + bit -1, bpos, bit-1));
+ *bpos &= ~(1 << (bit-1));
+ lb_num = offset + bit-1;
+ *lmappos++ = lb_num;
+ *num_lb = *num_lb - 1;
+ /* offset = (offset & ~7); */
+ }
+ }
+
+ if (ismetadata) {
+ bitmap->metadata_pos = offset;
+ } else {
+ bitmap->data_pos = offset;
+ }
+}
+
+
+static void
+udf_bitmap_free(struct udf_bitmap *bitmap, uint32_t lb_num, uint32_t num_lb)
+{
+ uint32_t offset;
+ uint32_t bit, bitval;
+ uint8_t *bpos;
+
+ offset = lb_num;
+
+ /* starter bits */
+ bpos = bitmap->bits + offset/8;
+ bit = offset % 8;
+ while ((bit != 0) && (num_lb > 0)) {
+ bitval = (1 << bit);
+ KASSERT((*bpos & bitval) == 0);
+ DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
+ offset, bpos, bit));
+ *bpos |= bitval;
+ offset++; num_lb--;
+ bit = (bit + 1) % 8;
+ }
+ if (num_lb == 0)
+ return;
+
+ /* whole bytes */
+ KASSERT(bit == 0);
+ bpos = bitmap->bits + offset / 8;
+ while (num_lb >= 8) {
+ KASSERT((*bpos == 0));
+ DPRINTF(PARANOIA, ("XXX : free %d + 8, %p\n", offset, bpos));
+ *bpos = 255;
+ offset += 8; num_lb -= 8;
+ bpos++;
+ }
+
+ /* stop bits */
+ KASSERT(num_lb < 8);
+ bit = 0;
+ while (num_lb > 0) {
+ bitval = (1 << bit);
+ KASSERT((*bpos & bitval) == 0);
+ DPRINTF(PARANOIA, ("XXX : free %d, %p, %d\n",
+ offset, bpos, bit));
+ *bpos |= bitval;
+ offset++; num_lb--;
+ bit = (bit + 1) % 8;
+ }
+}
+
+
+static uint32_t
+udf_bitmap_check_trunc_free(struct udf_bitmap *bitmap, uint32_t to_trunc)
+{
+ uint32_t seq_free, offset;
+ uint8_t *bpos;
+ uint8_t bit, bitval;
+
+ DPRINTF(RESERVE, ("\ttrying to trunc %d bits from bitmap\n", to_trunc));
+ offset = bitmap->max_offset - to_trunc;
+
+ /* starter bits (if any) */
+ bpos = bitmap->bits + offset/8;
+ bit = offset % 8;
+ seq_free = 0;
+ while (to_trunc > 0) {
+ seq_free++;
+ bitval = (1 << bit);
+ if (!(*bpos & bitval))
+ seq_free = 0;
+ offset++; to_trunc--;
+ bit++;
+ if (bit == 8) {
+ bpos++;
+ bit = 0;
+ }
+ }
+
+ DPRINTF(RESERVE, ("\tfound %d sequential free bits in bitmap\n", seq_free));
+ return (seq_free);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * We check for overall disc space with a margin to prevent critical
+ * conditions. If disc space is low we try to force a sync() to improve our
+ * estimates. When confronted with meta-data partition size shortage we know
+ * we have to check if it can be extended and we need to extend it when
+ * needed.
+ *
+ * A 2nd strategy we could use when disc space is getting low on a disc
+ * formatted with a meta-data partition is to see if there are sparse areas in
+ * the meta-data partition and free blocks there for extra data.
+ */
+
+void
+udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
+ uint16_t vpart_num, uint32_t num_lb)
+{
+ ump->uncommitted_lbs[vpart_num] += num_lb;
+ if (udf_node)
+ udf_node->uncommitted_lbs += num_lb;
+}
+
+
+void
+udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node,
+ uint16_t vpart_num, uint32_t num_lb)
+{
+ ump->uncommitted_lbs[vpart_num] -= num_lb;
+ if (ump->uncommitted_lbs[vpart_num] < 0) {
+ DPRINTF(RESERVE, ("UDF: underflow on partition reservation, "
+ "part %d: %d\n", vpart_num,
+ ump->uncommitted_lbs[vpart_num]));
+ ump->uncommitted_lbs[vpart_num] = 0;
+ }
+ if (udf_node) {
+ udf_node->uncommitted_lbs -= num_lb;
+ if (udf_node->uncommitted_lbs < 0) {
+ DPRINTF(RESERVE, ("UDF: underflow of node "
+ "reservation : %d\n",
+ udf_node->uncommitted_lbs));
+ udf_node->uncommitted_lbs = 0;
+ }
+ }
+}
+
+
+int
+udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node,
+ int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail)
+{
+ uint64_t freeblks;
+ uint64_t slack;
+ int i, error;
+
+ slack = 0;
+ if (can_fail)
+ slack = UDF_DISC_SLACK;
+
+ error = 0;
+ mutex_enter(&ump->allocate_mutex);
+
+ /* check if there is enough space available */
+ for (i = 0; i < 3; i++) { /* XXX arbitrary number */
+ udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
+ if (num_lb + slack < freeblks)
+ break;
+ /* issue SYNC */
+ DPRINTF(RESERVE, ("udf_reserve_space: issuing sync\n"));
+ mutex_exit(&ump->allocate_mutex);
+ udf_do_sync(ump, FSCRED, 0);
+ mutex_enter(&mntvnode_lock);
+ /* 1/8 second wait */
+ cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
+ hz/8);
+ mutex_exit(&mntvnode_lock);
+ mutex_enter(&ump->allocate_mutex);
+ }
+
+ /* check if there is enough space available now */
+ udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
+ if (num_lb + slack >= freeblks) {
+ DPRINTF(RESERVE, ("udf_reserve_space: try to redistribute "
+ "partition space\n"));
+ DPRINTF(RESERVE, ("\tvpart %d, type %d is full\n",
+ vpart_num, ump->vtop_alloc[vpart_num]));
+ /* Try to redistribute space if possible */
+ udf_collect_free_space_for_vpart(ump, vpart_num, num_lb + slack);
+ }
+
+ /* check if there is enough space available now */
+ udf_calc_vpart_freespace(ump, vpart_num, &freeblks);
+ if (num_lb + slack <= freeblks) {
+ udf_do_reserve_space(ump, udf_node, vpart_num, num_lb);
+ } else {
+ DPRINTF(RESERVE, ("udf_reserve_space: out of disc space\n"));
+ error = ENOSPC;
+ }
+
+ mutex_exit(&ump->allocate_mutex);
+ return (error);
+}
+
+
+void
+udf_cleanup_reservation(struct udf_node *udf_node)
+{
+ struct udf_mount *ump = udf_node->ump;
+ int vpart_num;
+
+ mutex_enter(&ump->allocate_mutex);
+
+ /* compensate for overlapping blocks */
+ DPRINTF(RESERVE, ("UDF: overlapped %d blocks in count\n", udf_node->uncommitted_lbs));
+
+ vpart_num = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
+ udf_do_unreserve_space(ump, udf_node, vpart_num, udf_node->uncommitted_lbs);
+
+ DPRINTF(RESERVE, ("\ttotal now %d\n", ump->uncommitted_lbs[vpart_num]));
+
+ /* sanity */
+ if (ump->uncommitted_lbs[vpart_num] < 0)
+ ump->uncommitted_lbs[vpart_num] = 0;
+
+ mutex_exit(&ump->allocate_mutex);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocate an extent of given length on given virt. partition. It doesn't
+ * have to be one stretch.
+ */
+
+int
+udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node,
+ int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping)
+{
+ struct mmc_trackinfo *alloc_track, *other_track;
+ struct udf_bitmap *bitmap;
+ struct part_desc *pdesc;
+ struct logvol_int_desc *lvid;
+ uint64_t *lmappos;
+ uint32_t ptov, lb_num, *freepos, free_lbs;
+ int lb_size, alloc_num_lb;
+ int alloc_type, error;
+ int is_node;
+
+ DPRINTF(CALL, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
+ udf_c_type, vpart_num, num_lb));
+ mutex_enter(&ump->allocate_mutex);
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ KASSERT(lb_size == ump->discinfo.sector_size);
+
+ alloc_type = ump->vtop_alloc[vpart_num];
+ is_node = (udf_c_type == UDF_C_NODE);
+
+ lmappos = lmapping;
+ error = 0;
+ switch (alloc_type) {
+ case UDF_ALLOC_VAT :
+ /* search empty slot in VAT file */
+ KASSERT(num_lb == 1);
+ error = udf_search_free_vatloc(ump, &lb_num);
+ if (!error) {
+ *lmappos = lb_num;
+
+ /* reserve on the backing sequential partition since
+ * that partition is credited back later */
+ udf_do_reserve_space(ump, udf_node,
+ ump->vtop[vpart_num], num_lb);
+ }
+ break;
+ case UDF_ALLOC_SEQUENTIAL :
+ /* sequential allocation on recordable media */
+ /* get partition backing up this vpart_num_num */
+ pdesc = ump->partitions[ump->vtop[vpart_num]];
+
+ /* calculate offset from physical base partition */
+ ptov = le32toh(pdesc->start_loc);
+
+ /* get our track descriptors */
+ if (vpart_num == ump->node_part) {
+ alloc_track = &ump->metadata_track;
+ other_track = &ump->data_track;
+ } else {
+ alloc_track = &ump->data_track;
+ other_track = &ump->metadata_track;
+ }
+
+ /* allocate */
+ for (lb_num = 0; lb_num < num_lb; lb_num++) {
+ *lmappos++ = alloc_track->next_writable - ptov;
+ alloc_track->next_writable++;
+ alloc_track->free_blocks--;
+ }
+
+ /* keep other track up-to-date */
+ if (alloc_track->tracknr == other_track->tracknr)
+ memcpy(other_track, alloc_track,
+ sizeof(struct mmc_trackinfo));
+ break;
+ case UDF_ALLOC_SPACEMAP :
+ /* try to allocate on unallocated bits */
+ alloc_num_lb = num_lb;
+ bitmap = &ump->part_unalloc_bits[vpart_num];
+ udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
+ ump->lvclose |= UDF_WRITE_PART_BITMAPS;
+
+ /* have we allocated all? */
+ if (alloc_num_lb) {
+ /* TODO convert freed to unalloc and try again */
+ /* free allocated piece for now */
+ lmappos = lmapping;
+ for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
+ udf_bitmap_free(bitmap, *lmappos++, 1);
+ }
+ error = ENOSPC;
+ }
+ if (!error) {
+ /* adjust freecount */
+ lvid = ump->logvol_integrity;
+ freepos = &lvid->tables[0] + vpart_num;
+ free_lbs = le32toh(*freepos);
+ *freepos = le32toh(free_lbs - num_lb);
+ }
+ break;
+ case UDF_ALLOC_METABITMAP : /* UDF 2.50, 2.60 BluRay-RE */
+ /* allocate on metadata unallocated bits */
+ alloc_num_lb = num_lb;
+ bitmap = &ump->metadata_unalloc_bits;
+ udf_bitmap_allocate(bitmap, is_node, &alloc_num_lb, lmappos);
+ ump->lvclose |= UDF_WRITE_PART_BITMAPS;
+
+ /* have we allocated all? */
+ if (alloc_num_lb) {
+ /* YIKES! TODO we need to extend the metadata partition */
+ /* free allocated piece for now */
+ lmappos = lmapping;
+ for (lb_num=0; lb_num < num_lb-alloc_num_lb; lb_num++) {
+ udf_bitmap_free(bitmap, *lmappos++, 1);
+ }
+ error = ENOSPC;
+ }
+ if (!error) {
+ /* adjust freecount */
+ lvid = ump->logvol_integrity;
+ freepos = &lvid->tables[0] + vpart_num;
+ free_lbs = le32toh(*freepos);
+ *freepos = le32toh(free_lbs - num_lb);
+ }
+ break;
+ case UDF_ALLOC_METASEQUENTIAL : /* UDF 2.60 BluRay-R */
+ case UDF_ALLOC_RELAXEDSEQUENTIAL : /* UDF 2.50/~meta BluRay-R */
+ printf("ALERT: udf_allocate_space : allocation %d "
+ "not implemented yet!\n", alloc_type);
+ /* TODO implement, doesn't have to be contiguous */
+ error = ENOSPC;
+ break;
+ }
+
+ if (!error) {
+ /* credit our partition since we have committed the space */
+ udf_do_unreserve_space(ump, udf_node, vpart_num, num_lb);
+ }
+
+#ifdef DEBUG
+ if (udf_verbose & UDF_DEBUG_ALLOC) {
+ lmappos = lmapping;
+ printf("udf_allocate_space, allocated logical lba :\n");
+ for (lb_num = 0; lb_num < num_lb; lb_num++) {
+ printf("%s %"PRIu64, (lb_num > 0)?",":"",
+ *lmappos++);
+ }
+ printf("\n");
+ }
+#endif
+ mutex_exit(&ump->allocate_mutex);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+void
+udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num,
+ uint16_t vpart_num, uint32_t num_lb)
+{
+ struct udf_bitmap *bitmap;
+ struct part_desc *pdesc;
+ struct logvol_int_desc *lvid;
+ uint32_t ptov, lb_map, udf_rw32_lbmap;
+ uint32_t *freepos, free_lbs;
+ int phys_part;
+ int error;
+
+ DPRINTF(ALLOC, ("udf_free_allocated_space: freeing virt lbnum %d "
+ "part %d + %d sect\n", lb_num, vpart_num, num_lb));
+
+ /* no use freeing zero length */
+ if (num_lb == 0)
+ return;
+
+ mutex_enter(&ump->allocate_mutex);
+
+ /* get partition backing up this vpart_num */
+ pdesc = ump->partitions[ump->vtop[vpart_num]];
+
+ switch (ump->vtop_tp[vpart_num]) {
+ case UDF_VTOP_TYPE_PHYS :
+ case UDF_VTOP_TYPE_SPARABLE :
+ /* free space to freed or unallocated space bitmap */
+ ptov = le32toh(pdesc->start_loc);
+ phys_part = ump->vtop[vpart_num];
+
+ /* first try freed space bitmap */
+ bitmap = &ump->part_freed_bits[phys_part];
+
+ /* if not defined, use unallocated bitmap */
+ if (bitmap->bits == NULL)
+ bitmap = &ump->part_unalloc_bits[phys_part];
+
+ /* if no bitmaps are defined, bail out; XXX OK? */
+ if (bitmap->bits == NULL)
+ break;
+
+ /* free bits if its defined */
+ KASSERT(bitmap->bits);
+ ump->lvclose |= UDF_WRITE_PART_BITMAPS;
+ udf_bitmap_free(bitmap, lb_num, num_lb);
+
+ /* adjust freecount */
+ lvid = ump->logvol_integrity;
+ freepos = &lvid->tables[0] + vpart_num;
+ free_lbs = le32toh(*freepos);
+ *freepos = le32toh(free_lbs + num_lb);
+ break;
+ case UDF_VTOP_TYPE_VIRT :
+ /* free this VAT entry */
+ KASSERT(num_lb == 1);
+
+ lb_map = 0xffffffff;
+ udf_rw32_lbmap = le32toh(lb_map);
+ error = udf_vat_write(ump->vat_node,
+ (uint8_t *) &udf_rw32_lbmap, 4,
+ ump->vat_offset + lb_num * 4);
+ KASSERT(error == 0);
+ ump->vat_last_free_lb = MIN(ump->vat_last_free_lb, lb_num);
+ break;
+ case UDF_VTOP_TYPE_META :
+ /* free space in the metadata bitmap */
+ bitmap = &ump->metadata_unalloc_bits;
+ KASSERT(bitmap->bits);
+
+ ump->lvclose |= UDF_WRITE_PART_BITMAPS;
+ udf_bitmap_free(bitmap, lb_num, num_lb);
+
+ /* adjust freecount */
+ lvid = ump->logvol_integrity;
+ freepos = &lvid->tables[0] + vpart_num;
+ free_lbs = le32toh(*freepos);
+ *freepos = le32toh(free_lbs + num_lb);
+ break;
+ default:
+ printf("ALERT: udf_free_allocated_space : allocation %d "
+ "not implemented yet!\n", ump->vtop_tp[vpart_num]);
+ break;
+ }
+
+ mutex_exit(&ump->allocate_mutex);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Special function to synchronise the metadatamirror file when they change on
+ * resizing. When the metadatafile is actually duplicated, this action is a
+ * no-op since they describe different extents on the disc.
+ */
+
+void udf_synchronise_metadatamirror_node(struct udf_mount *ump)
+{
+ struct udf_node *meta_node, *metamirror_node;
+ struct long_ad s_ad;
+ int slot, cpy_slot;
+ int error, eof;
+
+ if (ump->metadata_flags & METADATA_DUPLICATED)
+ return;
+
+ meta_node = ump->metadata_node;
+ metamirror_node = ump->metadatamirror_node;
+
+ /* 1) wipe mirror node */
+ udf_wipe_adslots(metamirror_node);
+
+ /* 2) copy all node descriptors from the meta_node */
+ slot = 0;
+ cpy_slot = 0;
+ for (;;) {
+ udf_get_adslot(meta_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+ error = udf_append_adslot(metamirror_node, &cpy_slot, &s_ad);
+ if (error) {
+ /* WTF, this shouldn't happen, what to do now? */
+ panic("udf_synchronise_metadatamirror_node failed!");
+ }
+ slot++;
+ }
+
+ /* 3) adjust metamirror_node size */
+ if (meta_node->fe) {
+ KASSERT(metamirror_node->fe);
+ metamirror_node->fe->inf_len = meta_node->fe->inf_len;
+ } else {
+ KASSERT(meta_node->efe);
+ KASSERT(metamirror_node->efe);
+ metamirror_node->efe->inf_len = meta_node->efe->inf_len;
+ metamirror_node->efe->obj_size = meta_node->efe->obj_size;
+ }
+
+ /* for sanity */
+ udf_count_alloc_exts(metamirror_node);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * When faced with an out of space but there is still space available on other
+ * partitions, try to redistribute the space. This is only defined for media
+ * using Metadata partitions.
+ *
+ * There are two formats to deal with. Either its a `normal' metadata
+ * partition and we can move blocks between a metadata bitmap and its
+ * companion data spacemap OR its a UDF 2.60 formatted BluRay-R disc with POW
+ * and a metadata partition.
+ */
+
+static uint32_t
+udf_trunc_metadatapart(struct udf_mount *ump, uint32_t num_lb)
+{
+ struct udf_node *bitmap_node;
+ struct udf_bitmap *bitmap;
+ struct space_bitmap_desc *sbd, *new_sbd;
+ struct logvol_int_desc *lvid;
+ uint64_t inf_len;
+ uint64_t meta_free_lbs, data_free_lbs;
+ uint32_t *freepos, *sizepos;
+ uint32_t unit, lb_size, to_trunc;
+ uint16_t meta_vpart_num, data_vpart_num, num_vpart;
+ int err;
+
+ unit = ump->metadata_alloc_unit_size;
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ lvid = ump->logvol_integrity;
+
+ /* lookup vpart for metadata partition */
+ meta_vpart_num = ump->node_part;
+ KASSERT(ump->vtop_alloc[meta_vpart_num] == UDF_ALLOC_METABITMAP);
+
+ /* lookup vpart for data partition */
+ data_vpart_num = ump->data_part;
+ KASSERT(ump->vtop_alloc[data_vpart_num] == UDF_ALLOC_SPACEMAP);
+
+ udf_calc_vpart_freespace(ump, data_vpart_num, &data_free_lbs);
+ udf_calc_vpart_freespace(ump, meta_vpart_num, &meta_free_lbs);
+
+ DPRINTF(RESERVE, ("\tfree space on data partition %"PRIu64" blks\n", data_free_lbs));
+ DPRINTF(RESERVE, ("\tfree space on metadata partition %"PRIu64" blks\n", meta_free_lbs));
+
+ /* give away some of the free meta space, in unit block sizes */
+ to_trunc = meta_free_lbs/4; /* give out a quarter */
+ to_trunc = MAX(to_trunc, num_lb);
+ to_trunc = unit * ((to_trunc + unit-1) / unit); /* round up */
+
+ /* scale down if needed and bail out when out of space */
+ if (to_trunc >= meta_free_lbs)
+ return (num_lb);
+
+ /* check extent of bits marked free at the end of the map */
+ bitmap = &ump->metadata_unalloc_bits;
+ to_trunc = udf_bitmap_check_trunc_free(bitmap, to_trunc);
+ to_trunc = unit * (to_trunc / unit); /* round down again */
+ if (to_trunc == 0)
+ return (num_lb);
+
+ DPRINTF(RESERVE, ("\ttruncating %d lbs from the metadata bitmap\n",
+ to_trunc));
+
+ /* get length of the metadata bitmap node file */
+ bitmap_node = ump->metadatabitmap_node;
+ if (bitmap_node->fe) {
+ inf_len = le64toh(bitmap_node->fe->inf_len);
+ } else {
+ KASSERT(bitmap_node->efe);
+ inf_len = le64toh(bitmap_node->efe->inf_len);
+ }
+ inf_len -= to_trunc/8;
+
+ /* as per [UDF 2.60/2.2.13.6] : */
+ /* 1) update the SBD in the metadata bitmap file */
+ sbd = (struct space_bitmap_desc *) bitmap->blob;
+ sbd->num_bits = le32toh(sbd->num_bits) - to_trunc;
+ sbd->num_bytes = le32toh(sbd->num_bytes) - to_trunc/8;
+ bitmap->max_offset = le32toh(sbd->num_bits);
+
+ num_vpart = le32toh(lvid->num_part);
+ freepos = &lvid->tables[0] + meta_vpart_num;
+ sizepos = &lvid->tables[0] + num_vpart + meta_vpart_num;
+ *freepos = le32toh(*freepos) - to_trunc;
+ *sizepos = le32toh(*sizepos) - to_trunc;
+
+ /* realloc bitmap for better memory usage */
+ new_sbd = realloc(sbd, inf_len, M_UDFTEMP,
+ M_CANFAIL | M_WAITOK);
+ if (new_sbd) {
+ /* update pointers */
+ ump->metadata_unalloc_dscr = new_sbd;
+ bitmap->blob = (uint8_t *) new_sbd;
+ }
+ ump->lvclose |= UDF_WRITE_PART_BITMAPS;
+
+ /*
+ * The truncated space is secured now and can't be allocated anymore. Release
+ * the allocate mutex so we can shrink the nodes the normal way.
+ */
+ mutex_exit(&ump->allocate_mutex);
+
+ /* 2) trunc the metadata bitmap information file, freeing blocks */
+ err = udf_shrink_node(bitmap_node, inf_len);
+ KASSERT(err == 0);
+
+ /* 3) trunc the metadata file and mirror file, freeing blocks */
+ inf_len = le32toh(sbd->num_bits) * lb_size; /* [4/14.12.4] */
+ err = udf_shrink_node(ump->metadata_node, inf_len);
+ KASSERT(err == 0);
+ if (ump->metadatamirror_node && (ump->metadata_flags & METADATA_DUPLICATED)) {
+ err = udf_shrink_node(ump->metadatamirror_node, inf_len);
+ KASSERT(err == 0);
+ }
+ ump->lvclose |= UDF_WRITE_METAPART_NODES;
+
+ /* relock before exit */
+ mutex_enter(&ump->allocate_mutex);
+
+ if (to_trunc > num_lb)
+ return (0);
+ return (num_lb - to_trunc);
+}
+
+
+static void
+udf_sparsify_metadatapart(struct udf_mount *ump, uint32_t num_lb)
+{
+ /* NOT IMPLEMENTED, fail */
+}
+
+
+static void
+udf_collect_free_space_for_vpart(struct udf_mount *ump,
+ uint16_t vpart_num, uint32_t num_lb)
+{
+ /* allocate mutex is helt */
+
+ /* only defined for metadata partitions */
+ if (ump->vtop_tp[ump->node_part] != UDF_VTOP_TYPE_META) {
+ DPRINTF(RESERVE, ("\tcan't grow/shrink; no metadata partitioning\n"));
+ return;
+ }
+
+ /* UDF 2.60 BD-R+POW? */
+ if (ump->vtop_alloc[ump->node_part] == UDF_ALLOC_METASEQUENTIAL) {
+ DPRINTF(RESERVE, ("\tUDF 2.60 BD-R+POW track grow not implemented yet\n"));
+ return;
+ }
+
+ if (ump->vtop_tp[vpart_num] == UDF_VTOP_TYPE_META) {
+ /* try to grow the meta partition */
+ DPRINTF(RESERVE, ("\ttrying to grow the meta partition\n"));
+ /* as per [UDF 2.60/2.2.13.5] : extend bitmap and metadata file(s) */
+ } else {
+ /* try to shrink the metadata partition */
+ DPRINTF(RESERVE, ("\ttrying to shrink the meta partition\n"));
+ /* as per [UDF 2.60/2.2.13.6] : either trunc or make sparse */
+ num_lb = udf_trunc_metadatapart(ump, num_lb);
+ if (num_lb)
+ udf_sparsify_metadatapart(ump, num_lb);
+ }
+
+ /* allocate mutex should still be helt */
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Allocate a buf on disc for direct write out. The space doesn't have to be
+ * contiguous as the caller takes care of this.
+ */
+
+void
+udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf,
+ uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_nump)
+{
+ struct udf_node *udf_node = VTOI(buf->b_vp);
+ int lb_size, blks, udf_c_type;
+ int vpart_num, num_lb;
+ int error, s;
+
+ /*
+ * for each sector in the buf, allocate a sector on disc and record
+ * its position in the provided mapping array.
+ *
+ * If its userdata or FIDs, record its location in its node.
+ */
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ num_lb = (buf->b_bcount + lb_size -1) / lb_size;
+ blks = lb_size / DEV_BSIZE;
+ udf_c_type = buf->b_udf_c_type;
+
+ KASSERT(lb_size == ump->discinfo.sector_size);
+
+ /* select partition to record the buffer on */
+ vpart_num = *vpart_nump = udf_get_record_vpart(ump, udf_c_type);
+
+ if (udf_c_type == UDF_C_NODE) {
+ /* if not VAT, its allready allocated */
+ if (ump->vtop_alloc[ump->node_part] != UDF_ALLOC_VAT)
+ return;
+
+ /* allocate on its backing sequential partition */
+ vpart_num = ump->data_part;
+ }
+
+ /* XXX can this still happen? */
+ /* do allocation on the selected partition */
+ error = udf_allocate_space(ump, udf_node, udf_c_type,
+ vpart_num, num_lb, lmapping);
+ if (error) {
+ /*
+ * ARGH! we haven't done our accounting right! it should
+ * allways succeed.
+ */
+ panic("UDF disc allocation accounting gone wrong");
+ }
+
+ /* If its userdata or FIDs, record its allocation in its node. */
+ if ((udf_c_type == UDF_C_USERDATA) ||
+ (udf_c_type == UDF_C_FIDS) ||
+ (udf_c_type == UDF_C_METADATA_SBM))
+ {
+ udf_record_allocation_in_node(ump, buf, vpart_num, lmapping,
+ node_ad_cpy);
+ /* decrement our outstanding bufs counter */
+ s = splbio();
+ udf_node->outstanding_bufs--;
+ splx(s);
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
+ * possible (anymore); a2 returns the rest piece.
+ */
+
+static int
+udf_ads_merge(uint32_t lb_size, struct long_ad *a1, struct long_ad *a2)
+{
+ uint32_t max_len, merge_len;
+ uint32_t a1_len, a2_len;
+ uint32_t a1_flags, a2_flags;
+ uint32_t a1_lbnum, a2_lbnum;
+ uint16_t a1_part, a2_part;
+
+ max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
+
+ a1_flags = UDF_EXT_FLAGS(le32toh(a1->len));
+ a1_len = UDF_EXT_LEN(le32toh(a1->len));
+ a1_lbnum = le32toh(a1->loc.lb_num);
+ a1_part = le16toh(a1->loc.part_num);
+
+ a2_flags = UDF_EXT_FLAGS(le32toh(a2->len));
+ a2_len = UDF_EXT_LEN(le32toh(a2->len));
+ a2_lbnum = le32toh(a2->loc.lb_num);
+ a2_part = le16toh(a2->loc.part_num);
+
+ /* defines same space */
+ if (a1_flags != a2_flags)
+ return (1);
+
+ if (a1_flags != UDF_EXT_FREE) {
+ /* the same partition */
+ if (a1_part != a2_part)
+ return (1);
+
+ /* a2 is successor of a1 */
+ if (a1_lbnum * lb_size + a1_len != a2_lbnum * lb_size)
+ return (1);
+ }
+
+ /* merge as most from a2 if possible */
+ merge_len = MIN(a2_len, max_len - a1_len);
+ a1_len += merge_len;
+ a2_len -= merge_len;
+ a2_lbnum += merge_len/lb_size;
+
+ a1->len = le32toh(a1_len | a1_flags);
+ a2->len = le32toh(a2_len | a2_flags);
+ a2->loc.lb_num = le32toh(a2_lbnum);
+
+ if (a2_len > 0)
+ return (1);
+
+ /* there is space over to merge */
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+static void
+udf_wipe_adslots(struct udf_node *udf_node)
+{
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct alloc_ext_entry *ext;
+ uint64_t inflen, objsize;
+ uint32_t lb_size, dscr_size, l_ea, l_ad, max_l_ad, crclen;
+ uint8_t *data_pos;
+ int extnr;
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ inflen = le64toh(fe->inf_len);
+ objsize = inflen;
+ dscr_size = sizeof(struct file_entry) -1;
+ l_ea = le32toh(fe->l_ea);
+ l_ad = le32toh(fe->l_ad);
+ data_pos = (uint8_t *) fe + dscr_size + l_ea;
+ } else {
+ inflen = le64toh(efe->inf_len);
+ objsize = le64toh(efe->obj_size);
+ dscr_size = sizeof(struct extfile_entry) -1;
+ l_ea = le32toh(efe->l_ea);
+ l_ad = le32toh(efe->l_ad);
+ data_pos = (uint8_t *) efe + dscr_size + l_ea;
+ }
+ max_l_ad = lb_size - dscr_size - l_ea;
+
+ /* wipe fe/efe */
+ memset(data_pos, 0, max_l_ad);
+ crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea;
+ if (fe) {
+ fe->l_ad = le32toh(0);
+ fe->logblks_rec = le64toh(0);
+ fe->tag.desc_crc_len = le16toh(crclen);
+ } else {
+ efe->l_ad = le32toh(0);
+ efe->logblks_rec = le64toh(0);
+ efe->tag.desc_crc_len = le16toh(crclen);
+ }
+
+ /* wipe all allocation extent entries */
+ for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
+ ext = udf_node->ext[extnr];
+ dscr_size = sizeof(struct alloc_ext_entry) -1;
+ data_pos = (uint8_t *) ext->data;
+ max_l_ad = lb_size - dscr_size;
+ memset(data_pos, 0, max_l_ad);
+ ext->l_ad = le32toh(0);
+
+ crclen = dscr_size - UDF_DESC_TAG_LENGTH;
+ ext->tag.desc_crc_len = le16toh(crclen);
+ }
+ udf_node->i_flags |= IN_NODE_REBUILD;
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+void
+udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb,
+ int *eof) {
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct alloc_ext_entry *ext;
+ struct icb_tag *icbtag;
+ struct short_ad *short_ad;
+ struct long_ad *long_ad, l_icb;
+ int icbflags, addr_type, adlen, extnr;
+ uint32_t offset, lb_size, dscr_size, l_ea, l_ad, flags;
+ uint8_t *data_pos;
+
+ /* determine what descriptor we are in */
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ dscr_size = sizeof(struct file_entry) -1;
+ l_ea = le32toh(fe->l_ea);
+ l_ad = le32toh(fe->l_ad);
+ data_pos = (uint8_t *) fe + dscr_size + l_ea;
+ } else {
+ icbtag = &efe->icbtag;
+ dscr_size = sizeof(struct extfile_entry) -1;
+ l_ea = le32toh(efe->l_ea);
+ l_ad = le32toh(efe->l_ad);
+ data_pos = (uint8_t *) efe + dscr_size + l_ea;
+ }
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /* just in case we're called on an intern, its EOF */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ memset(icb, 0, sizeof(struct long_ad));
+ *eof = 1;
+ return;
+ }
+
+ adlen = 0;
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ adlen = sizeof(struct short_ad);
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ adlen = sizeof(struct long_ad);
+ }
+
+ /* if offset too big, we go to the allocation extensions */
+ offset = slot * adlen;
+ extnr = -1;
+ while (offset >= l_ad) {
+ /* check if our last entry is a redirect */
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
+ l_icb.len = short_ad->len;
+ l_icb.loc.part_num = udf_node->loc.loc.part_num;
+ l_icb.loc.lb_num = short_ad->lb_num;
+ } else {
+ KASSERT(addr_type == UDF_ICB_LONG_ALLOC,("addr_type != UDF_ICB_LONG_ALLOC"));
+ long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
+ l_icb = *long_ad;
+ }
+ flags = UDF_EXT_FLAGS(le32toh(l_icb.len));
+ if (flags != UDF_EXT_REDIRECT) {
+ l_ad = 0; /* force EOF */
+ break;
+ }
+
+ /* advance to next extent */
+ extnr++;
+ if (extnr >= udf_node->num_extensions) {
+ l_ad = 0; /* force EOF */
+ break;
+ }
+ offset = offset - l_ad;
+ ext = udf_node->ext[extnr];
+ dscr_size = sizeof(struct alloc_ext_entry) -1;
+ l_ad = le32toh(ext->l_ad);
+ data_pos = (uint8_t *) ext + dscr_size;
+ }
+
+ /* XXX l_ad == 0 should be enough to check */
+ *eof = (offset >= l_ad) || (l_ad == 0);
+ if (*eof) {
+ DPRINTF(PARANOIDADWLK, ("returning EOF, extnr %d, offset %d, "
+ "l_ad %d\n", extnr, offset, l_ad));
+ memset(icb, 0, sizeof(struct long_ad));
+ return;
+ }
+
+ /* get the element */
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + offset);
+ icb->len = short_ad->len;
+ icb->loc.part_num = udf_node->loc.loc.part_num;
+ icb->loc.lb_num = short_ad->lb_num;
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ long_ad = (struct long_ad *) (data_pos + offset);
+ *icb = *long_ad;
+ }
+ DPRINTF(PARANOIDADWLK, ("returning element : v %d, lb %d, len %d, "
+ "flags %d\n", icb->loc.part_num, icb->loc.lb_num,
+ UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
+}
+
+/* --------------------------------------------------------------------- */
+#if 0
+int
+udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb) {
+ struct udf_mount *ump = udf_node->ump;
+ union dscrptr *dscr, *extdscr;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct alloc_ext_entry *ext;
+ struct icb_tag *icbtag;
+ struct short_ad *short_ad;
+ struct long_ad *long_ad, o_icb, l_icb;
+ uint64_t logblks_rec, *logblks_rec_p;
+ uint64_t lmapping;
+ uint32_t offset, rest, len, lb_num;
+ uint32_t lb_size, dscr_size, l_ea, l_ad, *l_ad_p, max_l_ad, crclen;
+ uint32_t flags;
+ uint16_t vpart_num;
+ uint8_t *data_pos;
+ int icbflags, addr_type, adlen, extnr;
+ int error;
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ vpart_num = le16toh(udf_node->loc.loc.part_num);
+
+ /* determine what descriptor we are in */
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ dscr = (union dscrptr *) fe;
+ dscr_size = sizeof(struct file_entry) -1;
+
+ l_ea = le32toh(fe->l_ea);
+ l_ad_p = &fe->l_ad;
+ logblks_rec_p = &fe->logblks_rec;
+ } else {
+ icbtag = &efe->icbtag;
+ dscr = (union dscrptr *) efe;
+ dscr_size = sizeof(struct extfile_entry) -1;
+
+ l_ea = le32toh(efe->l_ea);
+ l_ad_p = &efe->l_ad;
+ logblks_rec_p = &efe->logblks_rec;
+ }
+ data_pos = (uint8_t *) dscr + dscr_size + l_ea;
+ max_l_ad = lb_size - dscr_size - l_ea;
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /* just in case we're called on an intern, its EOF */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
+ }
+
+ adlen = 0;
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ adlen = sizeof(struct short_ad);
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ adlen = sizeof(struct long_ad);
+ }
+
+ /* clean up given long_ad since it can be a synthesized one */
+ flags = UDF_EXT_FLAGS(le32toh(icb->len));
+ if (flags == UDF_EXT_FREE) {
+ icb->loc.part_num = le16toh(0);
+ icb->loc.lb_num = le32toh(0);
+ }
+
+ /* if offset too big, we go to the allocation extensions */
+ l_ad = le32toh(*l_ad_p);
+ offset = (*slot) * adlen;
+ extnr = -1;
+ while (offset >= l_ad) {
+ /* check if our last entry is a redirect */
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + l_ad-adlen);
+ l_icb.len = short_ad->len;
+ l_icb.loc.part_num = udf_node->loc.loc.part_num;
+ l_icb.loc.lb_num = short_ad->lb_num;
+ } else {
+ KASSERT(addr_type == UDF_ICB_LONG_ALLOC);
+ long_ad = (struct long_ad *) (data_pos + l_ad-adlen);
+ l_icb = *long_ad;
+ }
+ flags = UDF_EXT_FLAGS(le32toh(l_icb.len));
+ if (flags != UDF_EXT_REDIRECT) {
+ /* only one past the last one is adressable */
+ break;
+ }
+
+ /* advance to next extent */
+ extnr++;
+ KASSERT(extnr < udf_node->num_extensions);
+ offset = offset - l_ad;
+
+ ext = udf_node->ext[extnr];
+ dscr = (union dscrptr *) ext;
+ dscr_size = sizeof(struct alloc_ext_entry) -1;
+ max_l_ad = lb_size - dscr_size;
+ l_ad_p = &ext->l_ad;
+ l_ad = le32toh(*l_ad_p);
+ data_pos = (uint8_t *) ext + dscr_size;
+ }
+ DPRINTF(PARANOIDADWLK, ("append, ext %d, offset %d, l_ad %d\n",
+ extnr, offset, le32toh(*l_ad_p)));
+ KASSERT(l_ad == le32toh(*l_ad_p));
+
+ /* offset is offset within the current (E)FE/AED */
+ l_ad = le32toh(*l_ad_p);
+ crclen = le16toh(dscr->tag.desc_crc_len);
+ logblks_rec = le64toh(*logblks_rec_p);
+
+ /* overwriting old piece? */
+ if (offset < l_ad) {
+ /* overwrite entry; compensate for the old element */
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + offset);
+ o_icb.len = short_ad->len;
+ o_icb.loc.part_num = le16toh(0); /* ignore */
+ o_icb.loc.lb_num = short_ad->lb_num;
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ long_ad = (struct long_ad *) (data_pos + offset);
+ o_icb = *long_ad;
+ } else {
+ panic("Invalid address type in udf_append_adslot\n");
+ }
+
+ len = le32toh(o_icb.len);
+ if (UDF_EXT_FLAGS(len) == UDF_EXT_ALLOCATED) {
+ /* adjust counts */
+ len = UDF_EXT_LEN(len);
+ logblks_rec -= (len + lb_size -1) / lb_size;
+ }
+ }
+
+ /* check if we're not appending a redirection */
+ flags = UDF_EXT_FLAGS(le32toh(icb->len));
+ KASSERT(flags != UDF_EXT_REDIRECT);
+
+ /* round down available space */
+ rest = adlen * ((max_l_ad - offset) / adlen);
+ if (rest <= adlen) {
+ /* have to append aed, see if we already have a spare one */
+ extnr++;
+ ext = udf_node->ext[extnr];
+ l_icb = udf_node->ext_loc[extnr];
+ if (ext == NULL) {
+ DPRINTF(ALLOC,("adding allocation extent %d\n", extnr));
+
+ error = udf_reserve_space(ump, NULL, UDF_C_NODE,
+ vpart_num, 1, /* can fail */ false);
+ if (error) {
+ printf("UDF: couldn't reserve space for AED!\n");
+ return error;
+ }
+ error = udf_allocate_space(ump, NULL, UDF_C_NODE,
+ vpart_num, 1, &lmapping);
+ lb_num = lmapping;
+ if (error)
+ panic("UDF: couldn't allocate AED!\n");
+
+ /* initialise pointer to location */
+ memset(&l_icb, 0, sizeof(struct long_ad));
+ l_icb.len = le32toh(lb_size | UDF_EXT_REDIRECT);
+ l_icb.loc.lb_num = le32toh(lb_num);
+ l_icb.loc.part_num = le16toh(vpart_num);
+
+ /* create new aed descriptor */
+ udf_create_logvol_dscr(ump, udf_node, &l_icb, &extdscr);
+ ext = &extdscr->aee;
+
+ udf_inittag(ump, &ext->tag, TAGID_ALLOCEXTENT, lb_num);
+ dscr_size = sizeof(struct alloc_ext_entry) -1;
+ max_l_ad = lb_size - dscr_size;
+ memset(ext->data, 0, max_l_ad);
+ ext->l_ad = le32toh(0);
+ ext->tag.desc_crc_len =
+ le16toh(dscr_size - UDF_DESC_TAG_LENGTH);
+
+ /* declare aed */
+ udf_node->num_extensions++;
+ udf_node->ext_loc[extnr] = l_icb;
+ udf_node->ext[extnr] = ext;
+ }
+ /* add redirect and adjust l_ad and crclen for old descr */
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + offset);
+ short_ad->len = l_icb.len;
+ short_ad->lb_num = l_icb.loc.lb_num;
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ long_ad = (struct long_ad *) (data_pos + offset);
+ *long_ad = l_icb;
+ }
+ l_ad += adlen;
+ crclen += adlen;
+ dscr->tag.desc_crc_len = le16toh(crclen);
+ *l_ad_p = le32toh(l_ad);
+
+ /* advance to the new extension */
+ KASSERT(ext != NULL);
+ dscr = (union dscrptr *) ext;
+ dscr_size = sizeof(struct alloc_ext_entry) -1;
+ max_l_ad = lb_size - dscr_size;
+ data_pos = (uint8_t *) dscr + dscr_size;
+
+ l_ad_p = &ext->l_ad;
+ l_ad = le32toh(*l_ad_p);
+ crclen = le16toh(dscr->tag.desc_crc_len);
+ offset = 0;
+
+ /* adjust callees slot count for link insert */
+ *slot += 1;
+ }
+
+ /* write out the element */
+ DPRINTF(PARANOIDADWLK, ("adding element : %p : v %d, lb %d, "
+ "len %d, flags %d\n", data_pos + offset,
+ icb->loc.part_num, icb->loc.lb_num,
+ UDF_EXT_LEN(icb->len), UDF_EXT_FLAGS(icb->len)));
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ short_ad = (struct short_ad *) (data_pos + offset);
+ short_ad->len = icb->len;
+ short_ad->lb_num = icb->loc.lb_num;
+ } else if (addr_type == UDF_ICB_LONG_ALLOC) {
+ long_ad = (struct long_ad *) (data_pos + offset);
+ *long_ad = *icb;
+ }
+
+ /* adjust logblks recorded count */
+ len = le32toh(icb->len);
+ flags = UDF_EXT_FLAGS(len);
+ if (flags == UDF_EXT_ALLOCATED)
+ logblks_rec += (UDF_EXT_LEN(len) + lb_size -1) / lb_size;
+ *logblks_rec_p = le64toh(logblks_rec);
+
+ /* adjust l_ad and crclen when needed */
+ if (offset >= l_ad) {
+ l_ad += adlen;
+ crclen += adlen;
+ dscr->tag.desc_crc_len = le16toh(crclen);
+ *l_ad_p = le32toh(l_ad);
+ }
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+static void
+udf_count_alloc_exts(struct udf_node *udf_node)
+{
+ struct long_ad s_ad;
+ uint32_t lb_num, len, flags;
+ uint16_t vpart_num;
+ int slot, eof;
+ int num_extents, extnr;
+ int lb_size;
+
+ if (udf_node->num_extensions == 0)
+ return;
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+ /* count number of allocation extents in use */
+ num_extents = 0;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+
+ if (flags == UDF_EXT_REDIRECT)
+ num_extents++;
+
+ slot++;
+ }
+
+ DPRINTF(ALLOC, ("udf_count_alloc_ext counted %d live extents\n",
+ num_extents));
+
+ /* XXX choice: we could delay freeing them on node writeout */
+ /* free excess entries */
+ extnr = num_extents;
+ for (;extnr < udf_node->num_extensions; extnr++) {
+ DPRINTF(ALLOC, ("freeing alloc ext %d\n", extnr));
+ /* free dscriptor */
+ s_ad = udf_node->ext_loc[extnr];
+ udf_free_logvol_dscr(udf_node->ump, &s_ad,
+ udf_node->ext[extnr]);
+ udf_node->ext[extnr] = NULL;
+
+ /* free disc space */
+ lb_num = le32toh(s_ad.loc.lb_num);
+ vpart_num = le16toh(s_ad.loc.part_num);
+ udf_free_allocated_space(udf_node->ump, lb_num, vpart_num, 1);
+
+ memset(&udf_node->ext_loc[extnr], 0, sizeof(struct long_ad));
+ }
+
+ /* set our new number of allocation extents */
+ udf_node->num_extensions = num_extents;
+}
+
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Adjust the node's allocation descriptors to reflect the new mapping; do
+ * take note that we might glue to existing allocation descriptors.
+ *
+ * XXX Note there can only be one allocation being recorded/mount; maybe
+ * explicit allocation in shedule thread?
+ */
+
+static void
+udf_record_allocation_in_node(struct udf_mount *ump, struct buf *buf,
+ uint16_t vpart_num, uint64_t *mapping, struct long_ad *node_ad_cpy)
+{
+ struct vnode *vp = buf->b_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ struct long_ad s_ad, c_ad;
+ uint64_t inflen, from, till;
+ uint64_t foffset, end_foffset, restart_foffset;
+ uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
+ uint32_t num_lb, len, flags, lb_num;
+ uint32_t run_start;
+ uint32_t slot_offset, replace_len, replace;
+ int addr_type, icbflags;
+/* int udf_c_type = buf->b_udf_c_type; */
+ int lb_size, run_length, eof;
+ int slot, cpy_slot, cpy_slots, restart_slot;
+ int error;
+
+ DPRINTF(ALLOC, ("udf_record_allocation_in_node\n"));
+
+#if 0
+ /* XXX disable sanity check for now */
+ /* sanity check ... should be panic ? */
+ if ((udf_c_type != UDF_C_USERDATA) && (udf_c_type != UDF_C_FIDS))
+ return;
+#endif
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ /* do the job */
+ UDF_LOCK_NODE(udf_node, 0); /* XXX can deadlock ? */
+ udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ } else {
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ }
+
+ /* do check if `till' is not past file information length */
+ from = buf->b_lblkno * lb_size;
+ till = MIN(inflen, from + buf->b_resid);
+
+ num_lb = (till - from + lb_size -1) / lb_size;
+
+ DPRINTF(ALLOC, ("record allocation from %"PRIu64" + %d\n", from, buf->b_bcount));
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ /* nothing to do */
+ /* XXX clean up rest of node? just in case? */
+ UDF_UNLOCK_NODE(udf_node, 0);
+ return;
+ }
+
+ slot = 0;
+ cpy_slot = 0;
+ foffset = 0;
+
+ /* 1) copy till first overlap piece to the rewrite buffer */
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof) {
+ DPRINTF(WRITE,
+ ("Record allocation in node "
+ "failed: encountered EOF\n"));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ buf->b_error = EINVAL;
+ return;
+ }
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ end_foffset = foffset + len;
+ if (end_foffset > from)
+ break; /* found */
+
+ node_ad_cpy[cpy_slot++] = s_ad;
+
+ DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ foffset = end_foffset;
+ slot++;
+ }
+ restart_slot = slot;
+ restart_foffset = foffset;
+
+ /* 2) trunc overlapping slot at overlap and copy it */
+ slot_offset = from - foffset;
+ if (slot_offset > 0) {
+ DPRINTF(ALLOC, ("\tslot_offset = %d, flags = %d (%d)\n",
+ slot_offset, flags >> 30, flags));
+
+ s_ad.len = le32toh(slot_offset | flags);
+ node_ad_cpy[cpy_slot++] = s_ad;
+
+ DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+ }
+ foffset += slot_offset;
+
+ /* 3) insert new mappings */
+ memset(&s_ad, 0, sizeof(struct long_ad));
+ lb_num = 0;
+ for (lb_num = 0; lb_num < num_lb; lb_num++) {
+ run_start = mapping[lb_num];
+ run_length = 1;
+ while (lb_num < num_lb-1) {
+ if (mapping[lb_num+1] != mapping[lb_num]+1)
+ if (mapping[lb_num+1] != mapping[lb_num])
+ break;
+ run_length++;
+ lb_num++;
+ }
+ /* insert slot for this mapping */
+ len = run_length * lb_size;
+
+ /* bounds checking */
+ if (foffset + len > till)
+ len = till - foffset;
+ KASSERT(foffset + len <= inflen);
+
+ s_ad.len = le32toh(len | UDF_EXT_ALLOCATED);
+ s_ad.loc.part_num = le16toh(vpart_num);
+ s_ad.loc.lb_num = le32toh(run_start);
+
+ foffset += len;
+
+ /* paranoia */
+ if (len == 0) {
+ DPRINTF(WRITE,
+ ("Record allocation in node "
+ "failed: insert failed\n"));
+ UDF_UNLOCK_NODE(udf_node, 0);
+ buf->b_error = EINVAL;
+ return;
+ }
+ node_ad_cpy[cpy_slot++] = s_ad;
+
+ DPRINTF(ALLOC, ("\t3: insert new mapping vp %d lb %d, len %d, "
+ "flags %d -> stack\n",
+ le16toh(s_ad.loc.part_num), le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+ }
+
+ /* 4) pop replaced length */
+ slot = restart_slot;
+ foffset = restart_foffset;
+
+ replace_len = till - foffset; /* total amount of bytes to pop */
+ slot_offset = from - foffset; /* offset in first encounted slot */
+ KASSERT((slot_offset % lb_size) == 0);
+
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+ lb_num = le32toh(s_ad.loc.lb_num);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ DPRINTF(ALLOC, ("\t4i: got slot %d, slot_offset %d, "
+ "replace_len %d, "
+ "vp %d, lb %d, len %d, flags %d\n",
+ slot, slot_offset, replace_len,
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ /* adjust for slot offset */
+ if (slot_offset) {
+ DPRINTF(ALLOC, ("\t4s: skipping %d\n", slot_offset));
+ lb_num += slot_offset / lb_size;
+ len -= slot_offset;
+ foffset += slot_offset;
+ replace_len -= slot_offset;
+
+ /* mark adjusted */
+ slot_offset = 0;
+ }
+
+ /* advance for (the rest of) this slot */
+ replace = MIN(len, replace_len);
+ DPRINTF(ALLOC, ("\t4d: replacing %d\n", replace));
+
+ /* advance for this slot */
+ if (replace) {
+ /* note: dont round DOWN on num_lb since we then
+ * forget the last partial one */
+ num_lb = (replace + lb_size - 1) / lb_size;
+ if (flags != UDF_EXT_FREE) {
+ udf_free_allocated_space(ump, lb_num,
+ le16toh(s_ad.loc.part_num), num_lb);
+ }
+ lb_num += num_lb;
+ len -= replace;
+ foffset += replace;
+ replace_len -= replace;
+ }
+
+ /* do we have a slot tail ? */
+ if (len) {
+ KASSERT(foffset % lb_size == 0);
+
+ /* we arrived at our point, push remainder */
+ s_ad.len = le32toh(len | flags);
+ s_ad.loc.lb_num = le32toh(lb_num);
+ if (flags == UDF_EXT_FREE)
+ s_ad.loc.lb_num = le32toh(0);
+ node_ad_cpy[cpy_slot++] = s_ad;
+ foffset += len;
+ slot++;
+
+ DPRINTF(ALLOC, ("\t4: vp %d, lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+ break;
+ }
+
+ slot++;
+ }
+
+ /* 5) copy remainder */
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ node_ad_cpy[cpy_slot++] = s_ad;
+
+ DPRINTF(ALLOC, ("\t5: insert new mapping "
+ "vp %d lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ slot++;
+ }
+
+ /* 6) reset node descriptors */
+ udf_wipe_adslots(udf_node);
+
+ /* 7) copy back extents; merge when possible. Recounting on the fly */
+ cpy_slots = cpy_slot;
+
+ c_ad = node_ad_cpy[0];
+ slot = 0;
+ DPRINTF(ALLOC, ("\t7s: stack -> got mapping vp %d "
+ "lb %d, len %d, flags %d\n",
+ le16toh(c_ad.loc.part_num),
+ le32toh(c_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(c_ad.len)),
+ UDF_EXT_FLAGS(le32toh(c_ad.len)) >> 30));
+
+ for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
+ s_ad = node_ad_cpy[cpy_slot];
+
+ DPRINTF(ALLOC, ("\t7i: stack -> got mapping vp %d "
+ "lb %d, len %d, flags %d\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ /* see if we can merge */
+ if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
+ /* not mergable (anymore) */
+ DPRINTF(ALLOC, ("\t7: appending vp %d lb %d, "
+ "len %d, flags %d\n",
+ le16toh(c_ad.loc.part_num),
+ le32toh(c_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(c_ad.len)),
+ UDF_EXT_FLAGS(le32toh(c_ad.len)) >> 30));
+
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error) {
+ buf->b_error = error;
+ goto out;
+ }
+ c_ad = s_ad;
+ slot++;
+ }
+ }
+
+ /* 8) push rest slot (if any) */
+ if (UDF_EXT_LEN(c_ad.len) > 0) {
+ DPRINTF(ALLOC, ("\t8: last append vp %d lb %d, "
+ "len %d, flags %d\n",
+ le16toh(c_ad.loc.part_num),
+ le32toh(c_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(c_ad.len)),
+ UDF_EXT_FLAGS(le32toh(c_ad.len)) >> 30));
+
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error) {
+ buf->b_error = error;
+ goto out;
+ }
+ }
+
+out:
+ udf_count_alloc_exts(udf_node);
+
+ /* the node's descriptors should now be sane */
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(orig_inflen == new_inflen);
+ KASSERT(new_lbrec >= orig_lbrec);
+
+ return;
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_grow_node(struct udf_node *udf_node, uint64_t new_size)
+{
+ union dscrptr *dscr;
+ struct vnode *vp = udf_node->vnode;
+ struct udf_mount *ump = udf_node->ump;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ struct long_ad c_ad, s_ad;
+ uint64_t size_diff, old_size, inflen, objsize, chunk, append_len;
+ uint64_t foffset, end_foffset;
+ uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
+ uint32_t lb_size, dscr_size, crclen, lastblock_grow;
+ uint32_t icbflags, len, flags, max_len;
+ uint32_t max_l_ad, l_ad, l_ea;
+ uint16_t my_part, dst_part;
+ uint8_t *data_pos, *evacuated_data;
+ int addr_type;
+ int slot, cpy_slot;
+ int eof, error;
+
+ DPRINTF(ALLOC, ("udf_grow_node\n"));
+
+ UDF_LOCK_NODE(udf_node, 0);
+ udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ dscr = (union dscrptr *) fe;
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ objsize = inflen;
+ dscr_size = sizeof(struct file_entry) -1;
+ l_ea = le32toh(fe->l_ea);
+ l_ad = le32toh(fe->l_ad);
+ } else {
+ dscr = (union dscrptr *) efe;
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ objsize = le64toh(efe->obj_size);
+ dscr_size = sizeof(struct extfile_entry) -1;
+ l_ea = le32toh(efe->l_ea);
+ l_ad = le32toh(efe->l_ad);
+ }
+ data_pos = (uint8_t *) dscr + dscr_size + l_ea;
+ max_l_ad = lb_size - dscr_size - l_ea;
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ old_size = inflen;
+ size_diff = new_size - old_size;
+
+ DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
+
+ evacuated_data = NULL;
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ if (l_ad + size_diff <= max_l_ad) {
+ /* only reflect size change directly in the node */
+ inflen += size_diff;
+ objsize += size_diff;
+ l_ad += size_diff;
+ crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
+ if (fe) {
+ fe->inf_len = le64toh(inflen);
+ fe->l_ad = le32toh(l_ad);
+ fe->tag.desc_crc_len = le16toh(crclen);
+ } else {
+ efe->inf_len = le64toh(inflen);
+ efe->obj_size = le64toh(objsize);
+ efe->l_ad = le32toh(l_ad);
+ efe->tag.desc_crc_len = le16toh(crclen);
+ }
+ error = 0;
+
+ /* set new size for uvm */
+ uvm_vnp_setsize(vp, old_size);
+ uvm_vnp_setwritesize(vp, new_size);
+
+#if 0
+ /* zero append space in buffer */
+ uvm_vnp_zerorange(vp, old_size, new_size - old_size);
+#endif
+
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+
+ /* unlock */
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(new_inflen == orig_inflen + size_diff);
+ KASSERT(new_lbrec == orig_lbrec);
+ KASSERT(new_lbrec == 0);
+ return (0);
+ }
+
+ DPRINTF(ALLOC, ("\tCONVERT from internal\n"));
+
+ if (old_size > 0) {
+ /* allocate some space and copy in the stuff to keep */
+ evacuated_data = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+ memset(evacuated_data, 0, lb_size);
+
+ /* node is locked, so safe to exit mutex */
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ /* read in using the `normal' vn_rdwr() */
+ error = vn_rdwr(UIO_READ, udf_node->vnode,
+ evacuated_data, old_size, 0,
+ UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
+ FSCRED, NULL, NULL);
+
+ /* enter again */
+ UDF_LOCK_NODE(udf_node, 0);
+ }
+
+ /* convert to a normal alloc and select type */
+ my_part = le16toh(udf_node->loc.loc.part_num);
+ dst_part = udf_get_record_vpart(ump, udf_get_c_type(udf_node));
+ addr_type = UDF_ICB_SHORT_ALLOC;
+ if (dst_part != my_part)
+ addr_type = UDF_ICB_LONG_ALLOC;
+
+ icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+ icbflags |= addr_type;
+ icbtag->flags = le16toh(icbflags);
+
+ /* wipe old descriptor space */
+ udf_wipe_adslots(udf_node);
+
+ memset(&c_ad, 0, sizeof(struct long_ad));
+ c_ad.len = le32toh(old_size | UDF_EXT_FREE);
+ c_ad.loc.part_num = le16toh(0); /* not relevant */
+ c_ad.loc.lb_num = le32toh(0); /* not relevant */
+
+ slot = 0;
+ } else {
+ /* goto the last entry (if any) */
+ slot = 0;
+ cpy_slot = 0;
+ foffset = 0;
+ memset(&c_ad, 0, sizeof(struct long_ad));
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &c_ad, &eof);
+ if (eof)
+ break;
+
+ len = le32toh(c_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ end_foffset = foffset + len;
+ if (flags != UDF_EXT_REDIRECT)
+ foffset = end_foffset;
+
+ slot++;
+ }
+ /* at end of adslots */
+
+ /* special case if the old size was zero, then there is no last slot */
+ if (old_size == 0) {
+ c_ad.len = le32toh(0 | UDF_EXT_FREE);
+ c_ad.loc.part_num = le16toh(0); /* not relevant */
+ c_ad.loc.lb_num = le32toh(0); /* not relevant */
+ } else {
+ /* refetch last slot */
+ slot--;
+ udf_get_adslot(udf_node, slot, &c_ad, &eof);
+ }
+ }
+
+ /*
+ * If the length of the last slot is not a multiple of lb_size, adjust
+ * length so that it is; don't forget to adjust `append_len'! relevant for
+ * extending existing files
+ */
+ len = le32toh(c_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ lastblock_grow = 0;
+ if (len % lb_size > 0) {
+ lastblock_grow = lb_size - (len % lb_size);
+ lastblock_grow = MIN(size_diff, lastblock_grow);
+ len += lastblock_grow;
+ c_ad.len = le32toh(len | flags);
+
+ /* TODO zero appened space in buffer! */
+ /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
+ }
+ memset(&s_ad, 0, sizeof(struct long_ad));
+
+ /* size_diff can be bigger than allowed, so grow in chunks */
+ append_len = size_diff - lastblock_grow;
+ while (append_len > 0) {
+ chunk = MIN(append_len, max_len);
+ s_ad.len = le32toh(chunk | UDF_EXT_FREE);
+ s_ad.loc.part_num = le16toh(0);
+ s_ad.loc.lb_num = le32toh(0);
+
+ if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
+ /* not mergable (anymore) */
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error)
+ goto errorout;
+ slot++;
+ c_ad = s_ad;
+ memset(&s_ad, 0, sizeof(struct long_ad));
+ }
+ append_len -= chunk;
+ }
+
+ /* if there is a rest piece in the accumulator, append it */
+ if (UDF_EXT_LEN(le32toh(c_ad.len)) > 0) {
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error)
+ goto errorout;
+ slot++;
+ }
+
+ /* if there is a rest piece that didn't fit, append it */
+ if (UDF_EXT_LEN(le32toh(s_ad.len)) > 0) {
+ error = udf_append_adslot(udf_node, &slot, &s_ad);
+ if (error)
+ goto errorout;
+ slot++;
+ }
+
+ inflen += size_diff;
+ objsize += size_diff;
+ if (fe) {
+ fe->inf_len = le64toh(inflen);
+ } else {
+ efe->inf_len = le64toh(inflen);
+ efe->obj_size = le64toh(objsize);
+ }
+ error = 0;
+
+ if (evacuated_data) {
+ /* set new write size for uvm */
+ uvm_vnp_setwritesize(vp, old_size);
+
+ /* write out evacuated data */
+ error = vn_rdwr(UIO_WRITE, udf_node->vnode,
+ evacuated_data, old_size, 0,
+ UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
+ FSCRED, NULL, NULL);
+ uvm_vnp_setsize(vp, old_size);
+ }
+
+errorout:
+ if (evacuated_data)
+ free(evacuated_data, M_UDFTEMP);
+
+ udf_count_alloc_exts(udf_node);
+
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(new_inflen == orig_inflen + size_diff);
+ KASSERT(new_lbrec == orig_lbrec);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_shrink_node(struct udf_node *udf_node, uint64_t new_size)
+{
+ struct vnode *vp = udf_node->vnode;
+ struct udf_mount *ump = udf_node->ump;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct icb_tag *icbtag;
+ struct long_ad c_ad, s_ad, *node_ad_cpy;
+ uint64_t size_diff, old_size, inflen, objsize;
+ uint64_t foffset, end_foffset;
+ uint64_t orig_inflen, orig_lbrec, new_inflen, new_lbrec;
+ uint32_t lb_size, dscr_size, crclen;
+ uint32_t slot_offset, slot_offset_lb;
+ uint32_t len, flags, max_len;
+ uint32_t num_lb, lb_num;
+ uint32_t max_l_ad, l_ad, l_ea;
+ uint16_t vpart_num;
+ uint8_t *data_pos;
+ int icbflags, addr_type;
+ int slot, cpy_slot, cpy_slots;
+ int eof, error;
+
+ DPRINTF(ALLOC, ("udf_shrink_node\n"));
+
+ UDF_LOCK_NODE(udf_node, 0);
+ udf_node_sanity_check(udf_node, &orig_inflen, &orig_lbrec);
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ max_len = ((UDF_EXT_MAXLEN / lb_size) * lb_size);
+
+ /* do the work */
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+ if (fe) {
+ icbtag = &fe->icbtag;
+ inflen = le64toh(fe->inf_len);
+ objsize = inflen;
+ dscr_size = sizeof(struct file_entry) -1;
+ l_ea = le32toh(fe->l_ea);
+ l_ad = le32toh(fe->l_ad);
+ data_pos = (uint8_t *) fe + dscr_size + l_ea;
+ } else {
+ icbtag = &efe->icbtag;
+ inflen = le64toh(efe->inf_len);
+ objsize = le64toh(efe->obj_size);
+ dscr_size = sizeof(struct extfile_entry) -1;
+ l_ea = le32toh(efe->l_ea);
+ l_ad = le32toh(efe->l_ad);
+ data_pos = (uint8_t *) efe + dscr_size + l_ea;
+ }
+ max_l_ad = lb_size - dscr_size - l_ea;
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ old_size = inflen;
+ size_diff = old_size - new_size;
+
+ DPRINTF(ALLOC, ("\tfrom %"PRIu64" to %"PRIu64"\n", old_size, new_size));
+
+ /* shrink the node to its new size */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ /* only reflect size change directly in the node */
+ KASSERT(new_size <= max_l_ad);
+ inflen -= size_diff;
+ objsize -= size_diff;
+ l_ad -= size_diff;
+ crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
+ if (fe) {
+ fe->inf_len = le64toh(inflen);
+ fe->l_ad = le32toh(l_ad);
+ fe->tag.desc_crc_len = le16toh(crclen);
+ } else {
+ efe->inf_len = le64toh(inflen);
+ efe->obj_size = le64toh(objsize);
+ efe->l_ad = le32toh(l_ad);
+ efe->tag.desc_crc_len = le16toh(crclen);
+ }
+ error = 0;
+
+ /* clear the space in the descriptor */
+ KASSERT(old_size > new_size);
+ memset(data_pos + new_size, 0, old_size - new_size);
+
+ /* TODO zero appened space in buffer! */
+ /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
+
+ /* set new size for uvm */
+ uvm_vnp_setsize(vp, new_size);
+
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(new_inflen == orig_inflen - size_diff);
+ KASSERT(new_lbrec == orig_lbrec);
+ KASSERT(new_lbrec == 0);
+
+ return (0);
+ }
+
+ /* setup node cleanup extents copy space */
+ node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
+ M_UDFTEMP, M_WAITOK);
+ memset(node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
+
+ /*
+ * Shrink the node by releasing the allocations and truncate the last
+ * allocation to the new size. If the new size fits into the
+ * allocation descriptor itself, transform it into an
+ * UDF_ICB_INTERN_ALLOC.
+ */
+ slot = 0;
+ cpy_slot = 0;
+ foffset = 0;
+
+ /* 1) copy till first overlap piece to the rewrite buffer */
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof) {
+ DPRINTF(WRITE,
+ ("Shrink node failed: "
+ "encountered EOF\n"));
+ error = EINVAL;
+ goto errorout; /* panic? */
+ }
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ end_foffset = foffset + len;
+ if (end_foffset > new_size)
+ break; /* found */
+
+ node_ad_cpy[cpy_slot++] = s_ad;
+
+ DPRINTF(ALLOC, ("\t1: vp %d, lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ foffset = end_foffset;
+ slot++;
+ }
+ slot_offset = new_size - foffset;
+
+ /* 2) trunc overlapping slot at overlap and copy it */
+ if (slot_offset > 0) {
+ lb_num = le32toh(s_ad.loc.lb_num);
+ vpart_num = le16toh(s_ad.loc.part_num);
+
+ if (flags == UDF_EXT_ALLOCATED) {
+ /* calculate extent in lb, and offset in lb */
+ num_lb = (len + lb_size -1) / lb_size;
+ slot_offset_lb = (slot_offset + lb_size -1) / lb_size;
+
+ /* adjust our slot */
+ lb_num += slot_offset_lb;
+ num_lb -= slot_offset_lb;
+
+ udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
+ }
+
+ s_ad.len = le32toh(slot_offset | flags);
+ node_ad_cpy[cpy_slot++] = s_ad;
+ slot++;
+
+ DPRINTF(ALLOC, ("\t2: vp %d, lb %d, len %d, flags %d "
+ "-> stack\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+ }
+
+ /* 3) delete remainder */
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &s_ad, &eof);
+ if (eof)
+ break;
+
+ len = le32toh(s_ad.len);
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ if (flags == UDF_EXT_REDIRECT) {
+ slot++;
+ continue;
+ }
+
+ DPRINTF(ALLOC, ("\t3: delete remainder "
+ "vp %d lb %d, len %d, flags %d\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ if (flags == UDF_EXT_ALLOCATED) {
+ lb_num = le32toh(s_ad.loc.lb_num);
+ vpart_num = le16toh(s_ad.loc.part_num);
+ num_lb = (len + lb_size - 1) / lb_size;
+
+ udf_free_allocated_space(ump, lb_num, vpart_num,
+ num_lb);
+ }
+
+ slot++;
+ }
+
+ /* 4) if it will fit into the descriptor then convert */
+ if (new_size < max_l_ad) {
+ /*
+ * resque/evacuate old piece by reading it in, and convert it
+ * to internal alloc.
+ */
+ if (new_size == 0) {
+ /* XXX/TODO only for zero sizing now */
+ udf_wipe_adslots(udf_node);
+
+ icbflags &= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+ icbflags |= UDF_ICB_INTERN_ALLOC;
+ icbtag->flags = le16toh(icbflags);
+
+ inflen -= size_diff; KASSERT(inflen == 0);
+ objsize -= size_diff;
+ l_ad = new_size;
+ crclen = dscr_size - UDF_DESC_TAG_LENGTH + l_ea + l_ad;
+ if (fe) {
+ fe->inf_len = le64toh(inflen);
+ fe->l_ad = le32toh(l_ad);
+ fe->tag.desc_crc_len = le16toh(crclen);
+ } else {
+ efe->inf_len = le64toh(inflen);
+ efe->obj_size = le64toh(objsize);
+ efe->l_ad = le32toh(l_ad);
+ efe->tag.desc_crc_len = le16toh(crclen);
+ }
+ /* eventually copy in evacuated piece */
+ /* set new size for uvm */
+ uvm_vnp_setsize(vp, new_size);
+
+ free(node_ad_cpy, M_UDFTEMP);
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(new_inflen == orig_inflen - size_diff);
+ KASSERT(new_inflen == 0);
+ KASSERT(new_lbrec == 0);
+
+ return (0);
+ }
+
+ printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
+ }
+
+ /* 5) reset node descriptors */
+ udf_wipe_adslots(udf_node);
+
+ /* 6) copy back extents; merge when possible. Recounting on the fly */
+ cpy_slots = cpy_slot;
+
+ c_ad = node_ad_cpy[0];
+ slot = 0;
+ for (cpy_slot = 1; cpy_slot < cpy_slots; cpy_slot++) {
+ s_ad = node_ad_cpy[cpy_slot];
+
+ DPRINTF(ALLOC, ("\t6: stack -> got mapping vp %d "
+ "lb %d, len %d, flags %d\n",
+ le16toh(s_ad.loc.part_num),
+ le32toh(s_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(s_ad.len)),
+ UDF_EXT_FLAGS(le32toh(s_ad.len)) >> 30));
+
+ /* see if we can merge */
+ if (udf_ads_merge(lb_size, &c_ad, &s_ad)) {
+ /* not mergable (anymore) */
+ DPRINTF(ALLOC, ("\t6: appending vp %d lb %d, "
+ "len %d, flags %d\n",
+ le16toh(c_ad.loc.part_num),
+ le32toh(c_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(c_ad.len)),
+ UDF_EXT_FLAGS(le32toh(c_ad.len)) >> 30));
+
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error)
+ goto errorout; /* panic? */
+ c_ad = s_ad;
+ slot++;
+ }
+ }
+
+ /* 7) push rest slot (if any) */
+ if (UDF_EXT_LEN(c_ad.len) > 0) {
+ DPRINTF(ALLOC, ("\t7: last append vp %d lb %d, "
+ "len %d, flags %d\n",
+ le16toh(c_ad.loc.part_num),
+ le32toh(c_ad.loc.lb_num),
+ UDF_EXT_LEN(le32toh(c_ad.len)),
+ UDF_EXT_FLAGS(le32toh(c_ad.len)) >> 30));
+
+ error = udf_append_adslot(udf_node, &slot, &c_ad);
+ if (error)
+ goto errorout; /* panic? */
+ ;
+ }
+
+ inflen -= size_diff;
+ objsize -= size_diff;
+ if (fe) {
+ fe->inf_len = le64toh(inflen);
+ } else {
+ efe->inf_len = le64toh(inflen);
+ efe->obj_size = le64toh(objsize);
+ }
+ error = 0;
+
+ /* set new size for uvm */
+ uvm_vnp_setsize(vp, new_size);
+
+errorout:
+ free(node_ad_cpy, M_UDFTEMP);
+
+ udf_count_alloc_exts(udf_node);
+
+ udf_node_sanity_check(udf_node, &new_inflen, &new_lbrec);
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ KASSERT(new_inflen == orig_inflen - size_diff);
+
+ return (error);
+}
+#endif
Index: sys/fs/udf2/udf_filenames.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_filenames.c
@@ -0,0 +1,201 @@
+/*-
+ * Copyright (c) 2012 Will DeVries
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/iconv.h>
+#include <sys/systm.h> /* printf */
+
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+#include "udf_mount.h"
+
+extern struct iconv_functions *udf2_iconv;
+
+static int
+udf_to_utf8(char **result, size_t *rrem, uint32_t ch)
+{
+ int n = 0;
+ char *rp = *result;
+
+ if ((ch & 0xFFFFFF80) == 0) {
+ if (*rrem < 1)
+ return (0);
+
+ n = 1;
+ rp[0] = ch & 0x7F;
+ } else if ((ch & 0xFFFFF800) == 0) {
+ if (*rrem < 2)
+ return (0);
+
+ n = 2;
+ rp[0] = 0xC0 | (ch >> 6);
+ rp[1] = 0x80 | (0x3F & ch);
+ } else if ((ch & 0xFFFF0000) == 0) {
+ if (*rrem < 3)
+ return (0);
+
+ n = 3;
+ rp[0] = 0xE0 | (ch >> 12);
+ rp[1] = 0x80 | (0x3F & (ch >> 6));
+ rp[2] = 0x80 | (0x3F & ch);
+ } else if ((ch & 0xFFE00000) == 0) {
+ if (*rrem < 4)
+ return (0);
+
+ n = 4;
+ rp[0] = 0xF0 | (ch >> 18);
+ rp[1] = 0x80 | (0x3F & (ch >> 12));
+ rp[2] = 0x80 | (0x3F & (ch >> 6));
+ rp[3] = 0x80 | (0x3F & ch);
+ } else {
+ /* do not convert points above 21 bits. */
+ return (0);
+ }
+
+ *rrem -= n;
+ *result += n;
+ return (n);
+}
+
+static void
+udf_convert_str(struct udf_mount *ump, char *result, size_t result_len, int *extloc,
+ int eightbit, char *id, int id_len)
+{
+ size_t rrem, chrem;
+ int i, endi, needsCRC, invalid;
+ uint32_t uch;
+ char *rp, ch[2];
+ const char *chp;
+ uint16_t *index;
+
+ index = malloc(id_len * sizeof(uint16_t), M_UDFTEMP, M_WAITOK);
+
+ if (eightbit)
+ endi = id_len;
+ else
+ endi = (id_len - 1 > 0) ? id_len - 1 : 0;
+
+ invalid = 0;
+ rp = result;
+ rrem = (size_t)result_len - 1; /* for the null */
+ for (i = 0; i < endi;) {
+ if (eightbit)
+ uch = id[i];
+ else
+ uch = id[i] << 8 | id[i+1];
+
+ index[i] = result_len - rrem;
+
+ if (rrem == 0) {
+ /* no more space, we need to truncate it. */
+ needsCRC = 1;
+ } else if (uch == 0 || uch == 0x2F) {
+ /* do not allow nulls or slashes */
+ invalid++;
+ } else if (ump->flags & UDFMNT_KICONV && udf2_iconv) {
+ /* it might be a valid character */
+ chrem = 2;
+ chp = ch;
+ ch[0] = uch >> 8;
+ ch[1] = uch & 0x00FF;
+ udf2_iconv->convchr(ump->iconv_d2l, &chp, &chrem, &rp, &rrem);
+ if (chrem > 0) {
+ /* not printable or doesn't fit */
+ invalid++;
+ needsCRC = 1;
+ } else
+ invalid = 0;
+ } else {
+ /* utf8 output */
+ /* it is a valid character */
+ if (udf_to_utf8(&rp, &rrem, uch) == 0) {
+ /* doesn't fit or too large */
+ invalid++;
+ needsCRC = 1;
+ } else
+ invalid = 0;
+ }
+
+ if (uch == 0x002E && i != 1) {
+ /* record locations of periods where they occur within
+ 5 char of the end, but not at the end or start */
+ if (eightbit && id_len - 6 > i && i + 1 != endi) {
+ *extloc = i;
+ } else if (!eightbit && id_len - 11 > i && i + 2 != endi) {
+ *extloc = i;
+ }
+ }
+
+ if (rrem > 0 && invalid == 1) {
+ uch = 0x5F;
+
+ /* if the result doesn't have space this may not fit */
+ if (ump->flags & UDFMNT_KICONV && udf2_iconv) {
+ chrem = 2;
+ chp = ch;
+ ch[0] = uch >> 8;
+ ch[1] = uch & 0x0F;
+ udf2_iconv->convchr(ump->iconv_d2l, &chp, &chrem, &rp, &rrem);
+ } else {
+ /* utf8 output */
+ udf_to_utf8(&rp, &rrem, uch);
+ }
+ invalid++;
+ }
+
+ if (eightbit)
+ i++;
+ else
+ i += 2;
+ }
+
+ *rp = '\0';
+
+ free(index, M_UDFTEMP);
+}
+
+void
+udf_to_unix_name(struct udf_mount *ump, char *result, int result_len, char *id, int id_len) {
+ int extloc, eightbit;
+
+ if (id[0] != 8 && id[0] != 16) {
+ /* this is either invalid or an empty string */
+ result_len = 0;
+ return;
+ }
+
+ if (id[0] == 8) {
+ eightbit = 1;
+ } else {
+ eightbit = 0;
+ }
+
+ udf_convert_str(ump, result, result_len, &extloc, eightbit, id+1, id_len-1);
+
+ return;
+}
Index: sys/fs/udf2/udf_iconv.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_iconv.c
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2003 Ryuichiro Imura
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/iconv.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+
+VFS_DECLARE_ICONV(udf2);
Index: sys/fs/udf2/udf_mount.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_mount.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2006 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the
+ * NetBSD Project. See http://www.NetBSD.org/ for
+ * information about NetBSD.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#ifndef _FS_UDF_UDF_MOUNT_H_
+#define _FS_UDF_UDF_MOUNT_H_
+
+/*
+ * Arguments to mount UDF filingsystem.
+ */
+
+#define UDFMNT_VERSION 1
+struct udf_args {
+ uint32_t version; /* version of this structure */
+ char *fspec; /* mount specifier */
+ int32_t sessionnr; /* session specifier, rel of abs */
+ uint32_t udfmflags; /* mount options */
+ int32_t gmtoff; /* offset from UTC in seconds */
+
+ uid_t anon_uid; /* mapping of anonymous files uid */
+ gid_t anon_gid; /* mapping of anonymous files gid */
+ uid_t nobody_uid; /* nobody:nobody will map to -1:-1 */
+ gid_t nobody_gid; /* nobody:nobody will map to -1:-1 */
+
+ uint32_t sector_size; /* for mounting dumps/files */
+
+ /* extendable */
+ uint8_t reserved[32];
+};
+
+
+/* udf mount options */
+
+#define UDFMNT_CLOSESESSION 0x00000001 /* close session on dismount */
+#define UDFMNT_BITS "\20\1CLOSESESSION"
+#define UDFMNT_KICONV 100
+
+#endif /* !_FS_UDF_UDF_MOUNT_H_ */
+
Index: sys/fs/udf2/udf_osta.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_osta.h
@@ -0,0 +1,15 @@
+/*
+ * Prototypes for the OSTA functions
+ */
+
+
+#ifndef _FS_UDF_OSTA_H_
+#define _FS_UDF_OSTA_H_
+
+#include <sys/types.h>
+
+unsigned short udf_cksum(unsigned char *, int);
+unsigned short udf_unicode_cksum(unsigned short *, int);
+uint16_t udf_ea_cksum(uint8_t *data);
+
+#endif /* _FS_UDF_OSTA_H_ */
Index: sys/fs/udf2/udf_osta.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_osta.c
@@ -0,0 +1,108 @@
+/*
+ * Various routines from the OSTA 2.01 specs. Copyrights are included with
+ * each code segment. Slight whitespace modifications have been made for
+ * formatting purposes. Typos/bugs have been fixed.
+ *
+ * Copyright to this code held by AT&T.
+ */
+
+#include "udf_osta.h"
+
+/*
+ * CRC 010041
+ */
+static unsigned short crc_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+ 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+ 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+ 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+ 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+ 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+ 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+ 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+ 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+ 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+ 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+ 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+ 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+ 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+ 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+ 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+ 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+ 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+ 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+ 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+ 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+ 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+ 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+
+unsigned short
+udf_cksum(unsigned char *s, int n)
+{
+ unsigned short crc=0;
+
+ while (n-- > 0)
+ crc = crc_table[(crc>>8 ^ *s++) & 0xff] ^ (crc<<8);
+ return (crc);
+}
+
+/* UNICODE Checksum */
+unsigned short
+udf_unicode_cksum(unsigned short *s, int n)
+{
+ unsigned short crc=0;
+
+ while (n-- > 0) {
+ /* Take high order byte first--corresponds to a big endian
+ * byte stream.
+ */
+ crc = crc_table[(crc>>8 ^ (*s>>8)) & 0xff] ^ (crc<<8);
+ crc = crc_table[(crc>>8 ^ (*s++ & 0xff)) & 0xff] ^ (crc<<8);
+ }
+ return (crc);
+}
+
+
+/*
+ * Calculates a 16-bit checksum of the Implementation Use
+ * Extended Attribute header or Application Use Extended Attribute
+ * header. The fields AttributeType through ImplementationIdentifier
+ * (or ApplicationIdentifier) inclusively represent the
+ * data covered by the checksum (48 bytes).
+ *
+ */
+uint16_t udf_ea_cksum(uint8_t *data) {
+ uint16_t checksum = 0;
+ int count;
+
+ for (count = 0; count < 48; count++) {
+ checksum += *data++;
+ }
+
+ return (checksum);
+}
+
+
+#ifdef MAIN
+unsigned char bytes[] = { 0x70, 0x6A, 0x77 };
+
+main(void)
+{
+ unsigned short x;
+ x = udf_cksum(bytes, sizeof bytes);
+ printf("checksum: calculated=%4.4x, correct=%4.4x\en", x, 0x3299);
+ exit(0);
+}
+#endif
+
+
Index: sys/fs/udf2/udf_readwrite.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_readwrite.c
@@ -0,0 +1,782 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ * Copyright (c) 2012 Will DeVries
+ * Copyright (c) 2007, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+#include <sys/buf.h>
+#include <sys/malloc.h>
+
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+#include "udf_mount.h"
+
+static int udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
+ uint32_t start, uint32_t sectors);
+/* --------------------------------------------------------------------- */
+
+#if 0
+void
+udf_fixup_fid_block(uint8_t *blob, int lb_size,
+ int rfix_pos, int max_rfix_pos, uint32_t lb_num)
+{
+ struct fileid_desc *fid;
+ uint8_t *fid_pos;
+ int fid_len, found;
+
+ /* needs to be word aligned */
+ KASSERT(rfix_pos % 4 == 0);
+
+ /* first resync with the FID stream !!! */
+ found = 0;
+ while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
+ fid_pos = blob + rfix_pos;
+ fid = (struct fileid_desc *) fid_pos;
+ if (udf_rw16(fid->tag.id) == TAGID_FID) {
+ if (udf_check_tag((union dscrptr *) fid) == 0)
+ found = 1;
+ }
+ if (found)
+ break;
+ /* try next location; can only be 4 bytes aligned */
+ rfix_pos += 4;
+ }
+
+ /* walk over the fids */
+ fid_pos = blob + rfix_pos;
+ while (rfix_pos + sizeof(struct desc_tag) <= max_rfix_pos) {
+ fid = (struct fileid_desc *) fid_pos;
+ if (udf_rw16(fid->tag.id) != TAGID_FID) {
+ /* end of FID stream; end of directory or currupted */
+ break;
+ }
+
+ /* update sector number and recalculate checkum */
+ fid->tag.tag_loc = udf_rw32(lb_num);
+ udf_validate_tag_sum((union dscrptr *) fid);
+
+ /* if the FID crosses the memory, we're done! */
+ if (rfix_pos + UDF_FID_SIZE >= max_rfix_pos)
+ break;
+
+ fid_len = udf_fidsize(fid);
+ fid_pos += fid_len;
+ rfix_pos += fid_len;
+ }
+}
+
+
+void
+udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num)
+{
+ struct desc_tag *tag;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct extattrhdr_desc *eahdr;
+ int l_ea;
+
+ /* get information from fe/efe */
+ tag = (struct desc_tag *) blob;
+ switch (udf_rw16(tag->id)) {
+ case TAGID_FENTRY :
+ fe = (struct file_entry *) blob;
+ l_ea = udf_rw32(fe->l_ea);
+ eahdr = (struct extattrhdr_desc *) fe->data;
+ break;
+ case TAGID_EXTFENTRY :
+ efe = (struct extfile_entry *) blob;
+ l_ea = udf_rw32(efe->l_ea);
+ eahdr = (struct extattrhdr_desc *) efe->data;
+ break;
+ case TAGID_INDIRECTENTRY :
+ case TAGID_ALLOCEXTENT :
+ case TAGID_EXTATTR_HDR :
+ return;
+ default:
+ panic("%s: passed bad tag\n", __func__);
+ }
+
+ /* something recorded here? (why am i called?) */
+ if (l_ea == 0)
+ return;
+
+#if 0
+ /* check extended attribute tag */
+ /* TODO XXX what to do when we encounter an error here? */
+ error = udf_check_tag(eahdr);
+ if (error)
+ return; /* for now */
+ if (udf_rw16(eahdr->tag.id) != TAGID_EXTATTR_HDR)
+ return; /* for now */
+ error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
+ if (error)
+ return; /* for now */
+#endif
+
+ DPRINTF(EXTATTR, ("node fixup: found %d bytes of extended attributes\n",
+ l_ea));
+
+ /* fixup eahdr tag */
+ eahdr->tag.tag_loc = udf_rw32(lb_num);
+ udf_validate_tag_and_crc_sums((union dscrptr *) eahdr);
+}
+
+
+void
+udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type)
+{
+ struct desc_tag *tag, *sbm_tag;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct alloc_ext_entry *ext;
+ uint32_t lb_size, lb_num;
+ uint32_t intern_pos, max_intern_pos;
+ int icbflags, addr_type, file_type, intern, has_fids, has_sbm, l_ea;
+
+ lb_size = udf_rw32(ump->logical_vol->lb_size);
+ /* if its not a node we're done */
+ if (udf_c_type != UDF_C_NODE)
+ return;
+
+ /* NOTE this could also be done in write_internal */
+ /* start of a descriptor */
+ l_ea = 0;
+ has_fids = 0;
+ has_sbm = 0;
+ intern = 0;
+ file_type = 0;
+ max_intern_pos = intern_pos = lb_num = 0; /* shut up gcc! */
+
+ tag = (struct desc_tag *) blob;
+ switch (udf_rw16(tag->id)) {
+ case TAGID_FENTRY :
+ fe = (struct file_entry *) tag;
+ l_ea = udf_rw32(fe->l_ea);
+ icbflags = udf_rw16(fe->icbtag.flags);
+ addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
+ file_type = fe->icbtag.file_type;
+ intern = (addr_type == UDF_ICB_INTERN_ALLOC);
+ intern_pos = UDF_FENTRY_SIZE + l_ea;
+ max_intern_pos = intern_pos + udf_rw64(fe->inf_len);
+ lb_num = udf_rw32(fe->tag.tag_loc);
+ break;
+ case TAGID_EXTFENTRY :
+ efe = (struct extfile_entry *) tag;
+ l_ea = udf_rw32(efe->l_ea);
+ icbflags = udf_rw16(efe->icbtag.flags);
+ addr_type = (icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK);
+ file_type = efe->icbtag.file_type;
+ intern = (addr_type == UDF_ICB_INTERN_ALLOC);
+ intern_pos = UDF_EXTFENTRY_SIZE + l_ea;
+ max_intern_pos = intern_pos + udf_rw64(efe->inf_len);
+ lb_num = udf_rw32(efe->tag.tag_loc);
+ break;
+ case TAGID_INDIRECTENTRY :
+ case TAGID_EXTATTR_HDR :
+ break;
+ case TAGID_ALLOCEXTENT :
+ /* force crclen to 8 for UDF version < 2.01 */
+ ext = (struct alloc_ext_entry *) tag;
+ if (udf_rw16(ump->logvol_info->min_udf_readver) <= 0x200)
+ ext->tag.desc_crc_len = udf_rw16(8);
+ break;
+ default:
+ panic("%s: passed bad tag\n", __func__);
+ break;
+ }
+
+ /* determine what to fix if its internally recorded */
+ if (intern) {
+ has_fids = (file_type == UDF_ICB_FILETYPE_DIRECTORY) ||
+ (file_type == UDF_ICB_FILETYPE_STREAMDIR);
+ has_sbm = (file_type == UDF_ICB_FILETYPE_META_BITMAP);
+ }
+
+ /* fixup internal extended attributes if present */
+ if (l_ea)
+ udf_fixup_internal_extattr(blob, lb_num);
+
+ /* fixup fids lb numbers */
+ if (has_fids)
+ udf_fixup_fid_block(blob, lb_size, intern_pos,
+ max_intern_pos, lb_num);
+
+ /* fixup space bitmap descriptor */
+ if (has_sbm) {
+ sbm_tag = (struct desc_tag *) (blob + intern_pos);
+ sbm_tag->tag_loc = tag->tag_loc;
+ udf_validate_tag_and_crc_sums((uint8_t *) sbm_tag);
+ }
+
+ udf_validate_tag_and_crc_sums(blob);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Set of generic descriptor readers and writers and their helper functions.
+ * Descriptors inside `logical space' i.e. inside logically mapped partitions
+ * can never be longer than one logical sector.
+ *
+ * NOTE that these functions *can* be used by the sheduler backends to read
+ * node descriptors too.
+ *
+ * For reading, the size of allocated piece is returned in multiple of sector
+ * size due to udf_calc_udf_malloc_size().
+ */
+int
+udf_read_node(struct udf_node *unode, uint8_t *blob, off_t start, int length)
+{
+ struct vnode *devvp = unode->ump->devvp;
+ struct buf *bp; /* *buf, *nestbuf, ; */
+ uint64_t file_size, lsect;
+ int icbflags, addr_type, error = 0;
+ uint32_t sector_size, blkinsect, fileblk, fileblkoff, numlsect, numb;
+ uint8_t *pos;
+
+ sector_size = unode->ump->discinfo.sector_size;
+ blkinsect = sector_size / DEV_BSIZE;
+
+ if (unode->fe) {
+ pos = &unode->fe->data[0] + le32toh(unode->fe->l_ea);
+ icbflags = le16toh(unode->fe->icbtag.flags);
+ file_size = le64toh(unode->fe->inf_len);
+ } else {
+ pos = &unode->efe->data[0] + le32toh(unode->efe->l_ea);
+ icbflags = le16toh(unode->efe->icbtag.flags);
+ file_size = le64toh(unode->efe->inf_len);
+ }
+
+ length = min(file_size - start, length);
+ fileblk = start / sector_size;
+ fileblkoff = start % sector_size;
+
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ numb = min(length, file_size - fileblkoff);
+ memcpy(blob, pos + fileblkoff, numb);
+ return (error);
+ }
+
+ while (length) {
+ error = udf_bmap_translate(unode, fileblk, &lsect, &numlsect);
+ if (error)
+ return (error);
+
+ if (lsect == UDF_TRANS_ZERO) {
+ numb = min(length, sector_size * numlsect - fileblkoff);
+ memset(blob, 0, numb);
+ length -= numb;
+ blob += numb;
+ fileblkoff = 0;
+ } else if (lsect == UDF_TRANS_INTERN) {
+ return (EDOOFUS);
+ } else {
+ while (numlsect > 0) {
+ if ((error = bread(devvp, lsect*blkinsect, sector_size, NOCRED,
+ &bp)) != 0) {
+ if (buf != NULL)
+ brelse(bp);
+ return (error);
+ }
+
+ numb = min(length, sector_size - fileblkoff);
+ bcopy(bp->b_data + fileblkoff, blob, numb);
+ brelse(bp);
+ bp = NULL;
+
+ blob += numb;
+ length -= numb;
+ lsect++;
+ numlsect--;
+ fileblkoff = 0;
+ }
+ }
+
+ fileblk += numlsect;
+ }
+ return (0);
+}
+
+/* SYNC reading of n blocks from specified sector */
+static int
+udf_read_phys_sectors(struct udf_mount *ump, int what, void *blob,
+ uint32_t start, uint32_t sectors)
+{
+ struct buf *bp; /* *buf, *nestbuf, ; */
+ int error = 0; /* piece; */
+ /* off_t lblkno, rblkno; */
+ uint32_t sector_size, blks; /* buf_offset; */
+ struct vnode *devvp = ump->devvp;
+
+ sector_size = ump->discinfo.sector_size;
+ blks = btodb(sector_size);
+
+ while (sectors > 0 && error == 0) {
+ if ((error = bread(devvp, start*blks, sector_size, NOCRED,
+ &bp)) != 0) {
+ if (buf != NULL)
+ brelse(bp);
+ return (error);
+ }
+
+ bcopy(bp->b_data, blob, sector_size);
+ brelse(bp);
+ bp = NULL;
+
+ blob = (void *) ((uint8_t *)blob + sector_size);
+ start++;
+ sectors--;
+ }
+
+ return (0);
+#if 0
+ DPRINTF(READ, ("udf_intbreadn() : sectors = %d, sector_size = %d\n",
+ sectors, sector_size));
+ buf = getiobuf(ump->devvp, true);
+ buf->b_flags = B_READ;
+ buf->b_cflags = BC_BUSY; /* needed? */
+ buf->b_iodone = NULL;
+ buf->b_data = blob;
+ buf->b_bcount = sectors * sector_size;
+ buf->b_resid = buf->b_bcount;
+ buf->b_bufsize = buf->b_bcount;
+ buf->b_private = NULL; /* not needed yet */
+ BIO_SETPRIO(buf, BPRIO_DEFAULT);
+ buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks;
+ buf->b_proc = NULL;
+
+ error = 0;
+ buf_offset = 0;
+ rblkno = start;
+ lblkno = 0;
+ while ((sectors > 0) && (error == 0)) {
+ piece = MIN(MAXPHYS/sector_size, sectors);
+ DPRINTF(READ, ("read in %d + %d\n", (uint32_t) rblkno, piece));
+
+ nestbuf = getiobuf(NULL, true);
+ nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
+ /* nestbuf is B_ASYNC */
+
+ /* identify this nestbuf */
+ nestbuf->b_lblkno = lblkno;
+
+ /* CD shedules on raw blkno */
+ nestbuf->b_blkno = rblkno * blks;
+ nestbuf->b_proc = NULL;
+ nestbuf->b_rawblkno = rblkno * blks;
+ nestbuf->b_udf_c_type = what;
+
+ udf_discstrat_queuebuf(ump, nestbuf);
+
+ lblkno += piece;
+ rblkno += piece;
+ buf_offset += piece * sector_size;
+ sectors -= piece;
+ }
+ error = biowait(buf);
+ putiobuf(buf);
+
+ return error;
+#endif
+}
+
+
+/* synchronous generic descriptor read */
+int
+udf_read_phys_dscr(struct udf_mount *ump, uint32_t sector,
+ struct malloc_type *mtype, union dscrptr **dstp)
+{
+ union dscrptr *dst, *new_dst;
+ int sectors, dscrlen, i, error, sector_size;
+ uint8_t *pos;
+
+ sector_size = ump->discinfo.sector_size;
+
+ *dstp = dst = NULL;
+ dscrlen = sector_size;
+
+ /* read initial piece */
+ dst = malloc(sector_size, mtype, M_WAITOK);
+ error = udf_read_phys_sectors(ump, UDF_C_DSCR, dst, sector, 1);
+
+ if (!error) {
+ /* check if its a valid tag */
+ error = udf_check_tag(dst);
+ if (error) {
+ /* check if its an empty block */
+ pos = (uint8_t *) dst;
+ for (i = 0; i < sector_size; i++, pos++) {
+ if (*pos) break;
+ }
+ if (i == sector_size) {
+ /* return no error but with no dscrptr */
+ /* dispose first block */
+ free(dst, mtype);
+ return (0);
+ }
+ }
+ /* calculate descriptor size */
+ dscrlen = udf_tagsize(dst, sector_size);
+ }
+
+ if (!error && (dscrlen > sector_size)) {
+ /*
+ * Read the rest of descriptor. Since it is only used at mount
+ * time its overdone to define and use a specific udf_intbreadn
+ * for this alone.
+ */
+
+ new_dst = realloc(dst, dscrlen, mtype, M_WAITOK);
+ if (new_dst == NULL) {
+ free(dst, mtype);
+ return (ENOMEM);
+ }
+ dst = new_dst;
+
+ sectors = (dscrlen + sector_size -1) / sector_size;
+
+ pos = (uint8_t *) dst + sector_size;
+ error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
+ sector + 1, sectors-1);
+ }
+ if (!error)
+ error = udf_check_tag_payload(dst, dscrlen);
+ if (error && dst) {
+ free(dst, mtype);
+ dst = NULL;
+ }
+ *dstp = dst;
+
+ return (error);
+}
+
+
+#if 0
+static void
+udf_write_phys_buf(struct udf_mount *ump, int what, struct buf *buf)
+{
+ struct buf *nestbuf;
+ uint32_t buf_offset;
+ off_t lblkno, rblkno;
+ int sector_size = ump->discinfo.sector_size;
+ int blks = sector_size / DEV_BSIZE;
+ uint32_t sectors;
+ int piece;
+ int error;
+
+ sectors = buf->b_bcount / sector_size;
+ DPRINTF(WRITE, ("udf_intbwriten() : sectors = %d, sector_size = %d\n",
+ sectors, sector_size));
+
+ /* don't forget to increase pending count for the bwrite itself */
+/* panic("NO WRITING\n"); */
+ if (buf->b_vp) {
+ mutex_enter(&buf->b_vp->v_interlock);
+ buf->b_vp->v_numoutput++;
+ mutex_exit(&buf->b_vp->v_interlock);
+ }
+
+ error = 0;
+ buf_offset = 0;
+ rblkno = buf->b_blkno / blks;
+ lblkno = 0;
+ while ((sectors > 0) && (error == 0)) {
+ piece = MIN(MAXPHYS/sector_size, sectors);
+ DPRINTF(WRITE, ("write out %d + %d\n",
+ (uint32_t) rblkno, piece));
+
+ nestbuf = getiobuf(NULL, true);
+ nestiobuf_setup(buf, nestbuf, buf_offset, piece * sector_size);
+ /* nestbuf is B_ASYNC */
+
+ /* identify this nestbuf */
+ nestbuf->b_lblkno = lblkno;
+
+ /* CD shedules on raw blkno */
+ nestbuf->b_blkno = rblkno * blks;
+ nestbuf->b_proc = NULL;
+ nestbuf->b_rawblkno = rblkno * blks;
+ nestbuf->b_udf_c_type = what;
+
+ udf_discstrat_queuebuf(ump, nestbuf);
+
+ lblkno += piece;
+ rblkno += piece;
+ buf_offset += piece * sector_size;
+ sectors -= piece;
+ }
+}
+
+
+/* SYNC writing of n blocks from specified sector */
+int
+udf_write_phys_sectors(struct udf_mount *ump, int what, void *blob,
+ uint32_t start, uint32_t sectors)
+{
+ struct vnode *vp;
+ struct buf *buf;
+ int sector_size = ump->discinfo.sector_size;
+ int blks = sector_size / DEV_BSIZE;
+ int error;
+
+ /* get transfer buffer */
+ vp = ump->devvp;
+ buf = getiobuf(vp, true);
+ buf->b_flags = B_WRITE;
+ buf->b_cflags = BC_BUSY; /* needed? */
+ buf->b_iodone = NULL;
+ buf->b_data = blob;
+ buf->b_bcount = sectors * sector_size;
+ buf->b_resid = buf->b_bcount;
+ buf->b_bufsize = buf->b_bcount;
+ buf->b_private = NULL; /* not needed yet */
+ BIO_SETPRIO(buf, BPRIO_DEFAULT);
+ buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks;
+ buf->b_proc = NULL;
+
+ /* do the write, wait and return error */
+ udf_write_phys_buf(ump, what, buf);
+ error = biowait(buf);
+ putiobuf(buf);
+
+ return (error);
+}
+
+
+/* synchronous generic descriptor write */
+int
+udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node, int what,
+ union dscrptr *dscr, uint32_t sector, uint32_t logsector)
+{
+ struct vnode *vp;
+ struct buf *buf;
+ int sector_size = ump->discinfo.sector_size;
+ int blks = sector_size / DEV_BSIZE;
+ int dscrlen;
+ int error;
+
+ /* set sector number in the descriptor and validate */
+ dscr->tag.tag_loc = udf_rw32(logsector);
+ udf_validate_tag_and_crc_sums(dscr);
+
+ /* calculate descriptor size */
+ dscrlen = udf_tagsize(dscr, sector_size);
+
+ /* get transfer buffer */
+ vp = udf_node ? udf_node->vnode : ump->devvp;
+ buf = getiobuf(vp, true);
+ buf->b_flags = B_WRITE;
+ buf->b_cflags = BC_BUSY; /* needed? */
+ buf->b_iodone = NULL;
+ buf->b_data = (void *) dscr;
+ buf->b_bcount = dscrlen;
+ buf->b_resid = buf->b_bcount;
+ buf->b_bufsize = buf->b_bcount;
+ buf->b_private = NULL; /* not needed yet */
+ BIO_SETPRIO(buf, BPRIO_DEFAULT);
+ buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
+ buf->b_proc = NULL;
+
+ /* do the write, wait and return error */
+ udf_write_phys_buf(ump, what, buf);
+ error = biowait(buf);
+ putiobuf(buf);
+
+ return (error);
+}
+
+
+/* asynchronous generic descriptor write */
+int
+udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
+ int what, union dscrptr *dscr,
+ uint32_t sector, uint32_t logsector,
+ void (*dscrwr_callback)(struct buf *))
+{
+ struct vnode *vp;
+ struct buf *buf;
+ int dscrlen;
+ int sector_size = ump->discinfo.sector_size;
+ int blks = sector_size / DEV_BSIZE;
+
+ KASSERT(dscrwr_callback);
+ DPRINTF(NODE, ("udf_write_phys_dscr_async() called\n"));
+
+ /* set sector number in the descriptor and validate */
+ dscr->tag.tag_loc = udf_rw32(logsector);
+ udf_validate_tag_and_crc_sums(dscr);
+
+ /* calculate descriptor size */
+ dscrlen = udf_tagsize(dscr, sector_size);
+
+ /* get transfer buffer */
+ vp = udf_node ? udf_node->vnode : ump->devvp;
+ buf = getiobuf(vp, true);
+ buf->b_flags = B_WRITE | B_ASYNC;
+ buf->b_cflags = BC_BUSY;
+ buf->b_iodone = dscrwr_callback;
+ buf->b_data = dscr;
+ buf->b_bcount = dscrlen;
+ buf->b_resid = buf->b_bcount;
+ buf->b_bufsize = buf->b_bcount;
+ buf->b_private = NULL; /* not needed yet */
+ BIO_SETPRIO(buf, BPRIO_DEFAULT);
+ buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = sector * blks;
+ buf->b_proc = NULL;
+
+ /* do the write and return no error */
+ udf_write_phys_buf(ump, what, buf);
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* disc strategy dispatchers */
+
+int
+udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node, struct long_ad *icb,
+ union dscrptr **dscrptr)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+ int error;
+
+ KASSERT(strategy);
+ args.ump = ump;
+ args.udf_node = udf_node;
+ args.icb = icb;
+ args.dscr = NULL;
+
+ error = (strategy->create_logvol_dscr)(&args);
+ *dscrptr = args.dscr;
+
+ return (error);
+}
+
+
+void
+udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
+ void *dscr)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+
+ KASSERT(strategy,("Strategy not specified."));
+ args.ump = ump;
+ args.icb = icb;
+ args.dscr = dscr;
+
+ (strategy->free_logvol_dscr)(&args);
+}
+
+
+int
+udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
+ union dscrptr **dscrptr)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+ int error;
+
+ KASSERT(strategy);
+ args.ump = ump;
+ args.icb = icb;
+ args.dscr = NULL;
+
+ error = (strategy->read_logvol_dscr)(&args);
+ *dscrptr = args.dscr;
+
+ return (error);
+}
+
+
+int
+udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
+ struct long_ad *icb, int waitfor)
+{
+ struct udf_strategy *strategy = udf_node->ump->strategy;
+ struct udf_strat_args args;
+ int error;
+
+ KASSERT(strategy);
+ args.ump = udf_node->ump;
+ args.udf_node = udf_node;
+ args.icb = icb;
+ args.dscr = dscr;
+ args.waitfor = waitfor;
+
+ error = (strategy->write_logvol_dscr)(&args);
+ return (error);
+}
+
+
+void
+udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+
+ KASSERT(strategy);
+ args.ump = ump;
+ args.nestbuf = nestbuf;
+
+ (strategy->queuebuf)(&args);
+}
+
+
+void
+udf_discstrat_init(struct udf_mount *ump)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+
+ KASSERT(strategy);
+ args.ump = ump;
+ (strategy->discstrat_init)(&args);
+}
+
+
+void udf_discstrat_finish(struct udf_mount *ump)
+{
+ struct udf_strategy *strategy = ump->strategy;
+ struct udf_strat_args args;
+
+ /* strategy might not have been set, so ignore if not set */
+ if (strategy) {
+ args.ump = ump;
+ (strategy->discstrat_finish)(&args);
+ }
+}
+#endif
+/* --------------------------------------------------------------------- */
+
Index: sys/fs/udf2/udf_subr.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_subr.h
@@ -0,0 +1,210 @@
+/*-
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _FS_UDF_UDF_SUBR_H_
+#define _FS_UDF_UDF_SUBR_H_
+
+/* handies */
+#define VFSTOUDF(mp) ((struct udf_mount *)mp->mnt_data)
+#define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
+
+
+/* device information updating */
+int udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *trackinfo);
+int udf_update_discinfo(struct udf_mount *ump, uint32_t sector_size, uint32_t psize);
+int udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
+ int *first_tracknr, int *last_tracknr);
+//int udf_search_writing_tracks(struct udf_mount *ump);
+//int udf_setup_writeparams(struct udf_mount *ump);
+int udf_synchronise_caches(struct udf_mount *ump);
+
+/* tags operations */
+int udf_fidsize(struct fileid_desc *fid);
+int udf_check_tag(void *blob);
+int udf_check_tag_payload(void *blob, uint32_t max_length);
+void udf_validate_tag_sum(void *blob);
+void udf_validate_tag_and_crc_sums(void *blob);
+int udf_tagsize(union dscrptr *dscr, uint32_t udf_sector_size);
+
+/* read/write descriptors */
+//int udf_write_phys_sectors(struct udf_mount *ump, int what, void *blob,
+// uint32_t start, uint32_t sectors);
+int udf_read_phys_dscr(
+ struct udf_mount *ump,
+ uint32_t sector,
+ struct malloc_type *mtype, /* where to allocate */
+ union dscrptr **dstp); /* out */
+
+//int udf_write_phys_dscr_sync(struct udf_mount *ump, struct udf_node *udf_node,
+// int what, union dscrptr *dscr,
+// uint32_t sector, uint32_t logsector);
+//int udf_write_phys_dscr_async(struct udf_mount *ump, struct udf_node *udf_node,
+// int what, union dscrptr *dscr,
+// uint32_t sector, uint32_t logsector,
+// void (*dscrwr_callback)(struct buf *));
+
+/* read/write node descriptors */
+//int udf_create_logvol_dscr(struct udf_mount *ump, struct udf_node *udf_node,
+// struct long_ad *icb, union dscrptr **dscrptr);
+//void udf_free_logvol_dscr(struct udf_mount *ump, struct long_ad *icb_loc,
+// void *dscr);
+//int udf_read_logvol_dscr(struct udf_mount *ump, struct long_ad *icb,
+// union dscrptr **dscrptr);
+//int udf_write_logvol_dscr(struct udf_node *udf_node, union dscrptr *dscr,
+// struct long_ad *icb, int waitfor);
+
+
+/* volume descriptors readers and checkers */
+int udf_read_anchors(struct udf_mount *ump);
+int udf_read_vds_space(struct udf_mount *ump);
+int udf_process_vds(struct udf_mount *ump);
+int udf_read_vds_tables(struct udf_mount *ump);
+int udf_read_rootdirs(struct udf_mount *ump);
+
+/* open/close and sync volumes */
+int udf_open_logvol(struct udf_mount *ump);
+int udf_close_logvol(struct udf_mount *ump, int mntflags);
+//int udf_writeout_vat(struct udf_mount *ump);
+//int udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor);
+//int udf_write_metadata_partition_spacetable(struct udf_mount *ump, int waitfor);
+//void udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor);
+//void udf_synchronise_metadatamirror_node(struct udf_mount *ump);
+
+/* translation services */
+int udf_translate_vtop(struct udf_mount *ump, struct long_ad *icb_loc,
+ uint32_t *lb_numres, uint32_t *extres);
+//void udf_translate_vtop_list(struct udf_mount *ump, uint32_t sectors,
+// uint16_t vpart_num, uint64_t *lmapping, uint64_t *pmapping);
+int udf_bmap_translate(struct udf_node *udf_node, uint32_t block,
+ uint64_t *lsector, uint32_t *maxblks);
+int udf_translate_file_extent(struct udf_node *node,
+ uint32_t from, uint32_t num_lb, uint64_t *map);
+void udf_get_adslot(struct udf_node *udf_node, int slot, struct long_ad *icb, int *eof);
+int udf_append_adslot(struct udf_node *udf_node, int *slot, struct long_ad *icb);
+
+int udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset);
+//int udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset);
+
+/* disc allocation */
+int udf_get_c_type(struct udf_node *udf_node);
+int udf_get_record_vpart(struct udf_mount *ump, int udf_c_type);
+//void udf_do_reserve_space(struct udf_mount *ump, struct udf_node *udf_node, uint16_t vpart_num, uint32_t num_lb);
+//void udf_do_unreserve_space(struct udf_mount *ump, struct udf_node *udf_node, uint16_t vpart_num, uint32_t num_lb);
+//int udf_reserve_space(struct udf_mount *ump, struct udf_node *udf_node, int udf_c_type, uint16_t vpart_num, uint32_t num_lb, int can_fail);
+//void udf_cleanup_reservation(struct udf_node *udf_node);
+//int udf_allocate_space(struct udf_mount *ump, struct udf_node *udf_node, int udf_c_type, uint16_t vpart_num, uint32_t num_lb, uint64_t *lmapping);
+//void udf_free_allocated_space(struct udf_mount *ump, uint32_t lb_num, uint16_t vpart_num, uint32_t num_lb);
+//void udf_late_allocate_buf(struct udf_mount *ump, struct buf *buf, uint64_t *lmapping, struct long_ad *node_ad_cpy, uint16_t *vpart_num);
+//int udf_grow_node(struct udf_node *node, uint64_t new_size);
+//int udf_shrink_node(struct udf_node *node, uint64_t new_size);
+void udf_calc_freespace(struct udf_mount *ump, uint64_t *sizeblks, uint64_t *freeblks);
+
+/* node readers and writers */
+//uint64_t udf_advance_uniqueid(struct udf_mount *ump);
+
+#define UDF_LOCK_NODE(udf_node, flag) udf_lock_node(udf_node, (flag), __FILE__, __LINE__)
+#define UDF_UNLOCK_NODE(udf_node, flag) udf_unlock_node(udf_node, (flag))
+void udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno);
+void udf_unlock_node(struct udf_node *udf_node, int flag);
+
+int udf_get_node(struct udf_mount *ump, ino_t ino, struct udf_node **ppunode);
+//int udf_writeout_node(struct udf_node *udf_node, int waitfor);
+int udf_dispose_node(struct udf_node *node);
+
+/* node ops */
+//int udf_resize_node(struct udf_node *node, uint64_t new_size, int *extended);
+int udf_extattr_search_intern(struct udf_node *node, uint32_t sattr, char const *sattrname, uint32_t *offsetp, uint32_t *lengthp);
+
+/* node data buffer read/write */
+void udf_read_filebuf(struct udf_node *node, struct buf *buf);
+//void udf_write_filebuf(struct udf_node *node, struct buf *buf);
+//void udf_fixup_fid_block(uint8_t *blob, int lb_size, int rfix_pos, int max_rfix_pos, uint32_t lb_num);
+//void udf_fixup_internal_extattr(uint8_t *blob, uint32_t lb_num);
+//void udf_fixup_node_internals(struct udf_mount *ump, uint8_t *blob, int udf_c_type);
+
+/* device strategy */
+//void udf_discstrat_init(struct udf_mount *ump);
+//void udf_discstrat_finish(struct udf_mount *ump);
+//void udf_discstrat_queuebuf(struct udf_mount *ump, struct buf *nestbuf);
+
+/* structure writers */
+//int udf_write_terminator(struct udf_mount *ump, uint32_t sector);
+
+/* structure creators */
+//void udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid, uint32_t sector);
+//void udf_set_regid(struct regid *regid, char const *name);
+//void udf_add_domain_regid(struct udf_mount *ump, struct regid *regid);
+//void udf_add_udf_regid(struct udf_mount *ump, struct regid *regid);
+//void udf_add_impl_regid(struct udf_mount *ump, struct regid *regid);
+//void udf_add_app_regid(struct udf_mount *ump, struct regid *regid);
+
+/* directory operations and helpers */
+void udf_osta_charset(struct charspec *charspec);
+int udf_read_fid_stream(struct vnode *vp, uint64_t *offset, struct fileid_desc *fid);
+int udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen, struct long_ad *icb_loc, int *found);
+//int udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, struct componentname *cnp);
+//void udf_delete_node(struct udf_node *udf_node);
+
+//int udf_chsize(struct vnode *vp, u_quad_t newsize, kauth_cred_t cred);
+//int udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node, struct udf_node *udf_node, struct componentname *cnp);
+//int udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node, struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp);
+//int udf_dir_update_rootentry(struct udf_mount *ump, struct udf_node *dir_node, struct udf_node *new_parent_node);
+
+/* update and times */
+//void udf_add_to_dirtylist(struct udf_node *udf_node);
+//void udf_remove_from_dirtylist(struct udf_node *udf_node);
+//void udf_itimes(struct udf_node *udf_node, struct timespec *acc,
+// struct timespec *mod, struct timespec *birth);
+//int udf_update(struct vnode *node, struct timespec *acc,
+// struct timespec *mod, struct timespec *birth, int updflags);
+
+/* helpers and converters */
+//void udf_init_nodes_tree(struct udf_mount *ump);
+long udf_get_node_id(const struct long_ad *icbptr); /* for `inode' numbering */
+int udf_get_node_longad(const ino_t ino, struct long_ad *icbptr);
+//int udf_compare_icb(const struct long_ad *a, const struct long_ad *b);
+uint32_t udf_getaccessmode(struct udf_node *node);
+//void udf_setaccessmode(struct udf_node *udf_node, mode_t mode);
+//void udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp);
+//void udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid);
+
+void udf_to_unix_name(struct udf_mount *ump, char *result, int result_len, char *id, int len);
+//void unix_to_udf_name(char *result, uint8_t *result_len, char const *name, int name_len, struct charspec *chsp);
+
+void udf_timestamp_to_timespec(struct udf_mount *ump, struct timestamp *timestamp, struct timespec *timespec);
+//void udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp);
+
+/* vnode operations */
+int udf_getanode(struct mount *mp, struct vnode **vpp);
+int udf_read_internal(struct udf_node *node, uint8_t *blob);
+
+/* Created by for testing */
+void udf_print_anchors(struct udf_mount *ump);
+struct udf_node * udf_alloc_node(void);
+void udf_free_node(struct udf_node *unode);
+int udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp);
+int udf_read_node(struct udf_node *unode, uint8_t *blob, off_t start, int length);
+#endif /* !_FS_UDF_UDF_SUBR_H_ */
Index: sys/fs/udf2/udf_subr.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_subr.c
@@ -0,0 +1,6742 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ * Copyright (c) 2012 Will DeVries
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+
+#include <machine/_inttypes.h>
+#include <sys/cdefs.h>
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/vnode.h>
+#include <sys/malloc.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <sys/iconv.h>
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+#include "udf_mount.h"
+#include "udfio.h"
+
+#define VTOI(vnode) ((struct udf_node *) (vnode)->v_data)
+
+#define UDF_SET_SYSTEMFILE(vp) \
+ /* XXXAD Is the vnode locked? */ \
+ (vp)->v_vflag |= VV_SYSTEM; \
+ vref(vp); \
+ vput(vp); \
+
+extern struct iconv_functions *udf2_iconv;
+extern int syncer_maxdelay; /* maximum delay time */
+extern int (**udf_vnodeop_p)(void *);
+
+/* --------------------------------------------------------------------- */
+
+#if 1
+
+static void
+udf_dump_discinfo(struct udf_mount *ump)
+{
+ char bits[128];
+ struct mmc_discinfo *di = &ump->discinfo;
+
+ if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
+ return;
+
+ printf("Device/media info :\n");
+ printf("\tMMC profile 0x%02x\n", di->mmc_profile);
+ printf("\tderived class %d\n", di->mmc_class);
+ printf("\tsector size %d\n", di->sector_size);
+ printf("\tdisc state %d\n", di->disc_state);
+ printf("\tlast ses state %d\n", di->last_session_state);
+ printf("\tbg format state %d\n", di->bg_format_state);
+ printf("\tfrst track %d\n", di->first_track);
+ printf("\tfst on last ses %d\n", di->first_track_last_session);
+ printf("\tlst on last ses %d\n", di->last_track_last_session);
+ printf("\tlink block penalty %d\n", di->link_block_penalty);
+ snprintf(bits, sizeof(bits), "%b\n", di->disc_flags, MMC_DFLAGS_FLAGBITS);
+ printf("\tdisc flags %s\n", bits);
+ printf("\tdisc id %x\n", di->disc_id);
+ printf("\tdisc barcode %"PRIx64"\n", di->disc_barcode);
+
+ printf("\tnum sessions %d\n", di->num_sessions);
+ printf("\tnum tracks %d\n", di->num_tracks);
+
+ snprintf(bits, sizeof(bits), "%b\n", (int) di->mmc_cur, MMC_CAP_FLAGBITS);
+ printf("\tcapabilities cur %s\n", bits);
+ snprintf(bits, sizeof(bits), "%b\n", (int) di->mmc_cap, MMC_CAP_FLAGBITS);
+ printf("\tcapabilities cap %s\n", bits);
+}
+
+#if 0
+static void
+udf_dump_trackinfo(struct mmc_trackinfo *trackinfo)
+{
+ char bits[128];
+
+ if ((udf_verbose & UDF_DEBUG_VOLUMES) == 0)
+ return;
+
+ printf("Trackinfo for track %d:\n", trackinfo->tracknr);
+ printf("\tsessionnr %d\n", trackinfo->sessionnr);
+ printf("\ttrack mode %d\n", trackinfo->track_mode);
+ printf("\tdata mode %d\n", trackinfo->data_mode);
+ snprintf(bits, sizeof(bits), "%b\n", trackinfo->flags, MMC_TRACKINFO_FLAGBITS);
+ printf("\tflags %s\n", bits);
+
+ printf("\ttrack start %d\n", trackinfo->track_start);
+ printf("\tnext_writable %d\n", trackinfo->next_writable);
+ printf("\tfree_blocks %d\n", trackinfo->free_blocks);
+ printf("\tpacket_size %d\n", trackinfo->packet_size);
+ printf("\ttrack size %d\n", trackinfo->track_size);
+ printf("\tlast recorded block %d\n", trackinfo->last_recorded);
+}
+
+#endif
+#else
+#define udf_dump_discinfo(a);
+#define udf_dump_trackinfo(a);
+#endif
+
+
+/* --------------------------------------------------------------------- */
+
+/* not called often */
+int
+udf_update_discinfo(struct udf_mount *ump, uint32_t sector_size, uint32_t psize)
+{
+ struct vnode *devvp = ump->devvp;
+ struct thread *td;
+ struct mmc_discinfo *di;
+ int error;
+
+ DPRINTF(VOLUMES, ("read/update disc info\n"));
+ di = &ump->discinfo;
+ td = curthread;
+ memset(di, 0, sizeof(struct mmc_discinfo));
+
+ /* check if we're on a MMC capable device, i.e. CD/DVD */
+ error = VOP_IOCTL(devvp, MMCGETDISCINFO, di, FKIOCTL, NOCRED, td);
+ if (error == 0) {
+ udf_dump_discinfo(ump);
+ return (0);
+ }
+
+ /* set up a disc info profile for partitions */
+ di->mmc_profile = 0x01; /* disc type */
+ di->mmc_class = MMC_CLASS_DISC;
+ di->disc_state = MMC_STATE_CLOSED;
+ di->last_session_state = MMC_STATE_CLOSED;
+ di->bg_format_state = MMC_BGFSTATE_COMPLETED;
+ di->link_block_penalty = 0;
+
+ di->mmc_cur = MMC_CAP_RECORDABLE | MMC_CAP_REWRITABLE |
+ MMC_CAP_ZEROLINKBLK | MMC_CAP_HW_DEFECTFREE;
+ di->mmc_cap = di->mmc_cur;
+ di->disc_flags = MMC_DFLAGS_UNRESTRICTED;
+
+ /* TODO problem with last_possible_lba on resizable VND; request */
+ di->last_possible_lba = psize/sector_size - 1;
+ di->sector_size = sector_size;
+
+ di->num_sessions = 1;
+ di->num_tracks = 1;
+
+ di->first_track = 1;
+ di->first_track_last_session = di->last_track_last_session = 1;
+
+ udf_dump_discinfo(ump);
+ return (0);
+}
+
+
+int
+udf_update_trackinfo(struct udf_mount *ump, struct mmc_trackinfo *ti)
+{
+ struct vnode *devvp = ump->devvp;
+ struct mmc_discinfo *di = &ump->discinfo;
+ struct thread *td;
+ int error, class;
+
+ DPRINTF(VOLUMES, ("read track info\n"));
+
+ td = curthread;
+ class = di->mmc_class;
+ if (class != MMC_CLASS_DISC) {
+ /* tracknr specified in struct ti */
+ error = VOP_IOCTL(devvp, MMCGETTRACKINFO, ti, FKIOCTL, NOCRED, td);
+ return (error);
+ }
+
+ /* disc partition support */
+ if (ti->tracknr != 1)
+ return (EIO);
+
+ /* create fake ti (TODO check for resized vnds) */
+ ti->sessionnr = 1;
+
+ ti->track_mode = 0; /* XXX */
+ ti->data_mode = 0; /* XXX */
+ ti->flags = MMC_TRACKINFO_LRA_VALID | MMC_TRACKINFO_NWA_VALID;
+
+ ti->track_start = 0;
+ ti->packet_size = 1;
+
+ /* TODO support for resizable vnd */
+ ti->track_size = di->last_possible_lba;
+ ti->next_writable = di->last_possible_lba;
+ ti->last_recorded = ti->next_writable;
+ ti->free_blocks = 0;
+
+ return (0);
+}
+
+#if 0
+int
+udf_setup_writeparams(struct udf_mount *ump)
+{
+ struct mmc_writeparams mmc_writeparams;
+ int error;
+
+ if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
+ return (0);
+
+ /*
+ * only CD burning normally needs setting up, but other disc types
+ * might need other settings to be made. The MMC framework will set up
+ * the nessisary recording parameters according to the disc
+ * characteristics read in. Modifications can be made in the discinfo
+ * structure passed to change the nature of the disc.
+ */
+
+ memset(&mmc_writeparams, 0, sizeof(struct mmc_writeparams));
+ mmc_writeparams.mmc_class = ump->discinfo.mmc_class;
+ mmc_writeparams.mmc_cur = ump->discinfo.mmc_cur;
+
+ /*
+ * UDF dictates first track to determine track mode for the whole
+ * disc. [UDF 1.50/6.10.1.1, UDF 1.50/6.10.2.1]
+ * To prevent problems with a `reserved' track in front we start with
+ * the 2nd track and if that is not valid, go for the 1st.
+ */
+ mmc_writeparams.tracknr = 2;
+ mmc_writeparams.data_mode = MMC_DATAMODE_DEFAULT; /* XA disc */
+ mmc_writeparams.track_mode = MMC_TRACKMODE_DEFAULT; /* data */
+
+ error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS, &mmc_writeparams,
+ FKIOCTL, NOCRED);
+ if (error) {
+ mmc_writeparams.tracknr = 1;
+ error = VOP_IOCTL(ump->devvp, MMCSETUPWRITEPARAMS,
+ &mmc_writeparams, FKIOCTL, NOCRED);
+ }
+ return (error);
+}
+
+
+#endif
+int
+udf_synchronise_caches(struct udf_mount *ump)
+{
+ struct mmc_op mmc_op;
+
+ DPRINTF(CALL, ("udf_synchronise_caches()\n"));
+
+ if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
+ return (0);
+
+ /* discs are done now */
+ if (ump->discinfo.mmc_class == MMC_CLASS_DISC)
+ return (0);
+
+ memset(&mmc_op, 0, sizeof(struct mmc_op));
+ mmc_op.operation = MMC_OP_SYNCHRONISECACHE;
+
+ /* ignore return code */
+ (void) VOP_IOCTL(ump->devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED, curthread);
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* track/session searching for mounting */
+int
+udf_search_tracks(struct udf_mount *ump, struct udf_args *args,
+ int *first_tracknr, int *last_tracknr)
+{
+ struct mmc_trackinfo trackinfo;
+ uint32_t tracknr, start_track, num_tracks;
+ int error;
+
+ /* if negative, sessionnr is relative to last session */
+ if (args->sessionnr < 0) {
+ args->sessionnr += ump->discinfo.num_sessions;
+ }
+
+ /* sanity */
+ if (args->sessionnr < 0)
+ args->sessionnr = 0;
+ if (args->sessionnr > ump->discinfo.num_sessions)
+ args->sessionnr = ump->discinfo.num_sessions;
+
+ /* search the tracks for this session, zero session nr indicates last */
+ if (args->sessionnr == 0)
+ args->sessionnr = ump->discinfo.num_sessions;
+ if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
+ args->sessionnr--;
+
+ /* sanity again */
+ if (args->sessionnr < 0)
+ args->sessionnr = 0;
+
+ /* search the first and last track of the specified session */
+ num_tracks = ump->discinfo.num_tracks;
+ start_track = ump->discinfo.first_track;
+
+ /* search for first track of this session */
+ for (tracknr = start_track; tracknr <= num_tracks; tracknr++) {
+ /* get track info */
+ trackinfo.tracknr = tracknr;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+
+ if (trackinfo.sessionnr == args->sessionnr)
+ break;
+ }
+ *first_tracknr = tracknr;
+
+ /* search for last track of this session */
+ for (;tracknr <= num_tracks; tracknr++) {
+ /* get track info */
+ trackinfo.tracknr = tracknr;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error || (trackinfo.sessionnr != args->sessionnr)) {
+ tracknr--;
+ break;
+ }
+ }
+ if (tracknr > num_tracks)
+ tracknr--;
+
+ *last_tracknr = tracknr;
+
+ if (*last_tracknr < *first_tracknr) {
+ printf( "udf_search_tracks: sanity check on drive+disc failed, "
+ "drive returned garbage\n");
+ return (EINVAL);
+ }
+
+ KASSERT(*last_tracknr >= *first_tracknr, ("udf_search_tracks: sanity check failed"));
+ return (0);
+}
+
+#if 0
+/*
+ * NOTE: this is the only routine in this file that directly peeks into the
+ * metadata file but since its at a larval state of the mount it can't hurt.
+ *
+ * XXX candidate for udf_allocation.c
+ * XXX clean me up!, change to new node reading code.
+ */
+
+static void
+udf_check_track_metadata_overlap(struct udf_mount *ump,
+ struct mmc_trackinfo *trackinfo)
+{
+ struct part_desc *part;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct short_ad *s_ad;
+ struct long_ad *l_ad;
+ uint32_t track_start, track_end;
+ uint32_t phys_part_start, phys_part_end, part_start, part_end;
+ uint32_t sector_size, len, alloclen, plb_num;
+ uint8_t *pos;
+ int addr_type, icblen, icbflags, flags;
+
+ /* get our track extents */
+ track_start = trackinfo->track_start;
+ track_end = track_start + trackinfo->track_size;
+
+ /* get our base partition extent */
+ KASSERT(ump->node_part == ump->fids_part);
+ part = ump->partitions[ump->node_part];
+ phys_part_start = le32toh(part->start_loc);
+ phys_part_end = phys_part_start + le32toh(part->part_len);
+
+ /* no use if its outside the physical partition */
+ if ((phys_part_start >= track_end) || (phys_part_end < track_start))
+ return;
+
+ /*
+ * now follow all extents in the fe/efe to see if they refer to this
+ * track
+ */
+
+ sector_size = ump->discinfo.sector_size;
+
+ /* XXX should we claim exclusive access to the metafile ? */
+ /* TODO: move to new node read code */
+ fe = ump->metadata_node->fe;
+ efe = ump->metadata_node->efe;
+ if (fe) {
+ alloclen = le32toh(fe->l_ad);
+ pos = &fe->data[0] + le32toh(fe->l_ea);
+ icbflags = le16toh(fe->icbtag.flags);
+ } else {
+ assert(efe);
+ alloclen = le32toh(efe->l_ad);
+ pos = &efe->data[0] + le32toh(efe->l_ea);
+ icbflags = le16toh(efe->icbtag.flags);
+ }
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ while (alloclen) {
+ if (addr_type == UDF_ICB_SHORT_ALLOC) {
+ icblen = sizeof(struct short_ad);
+ s_ad = (struct short_ad *) pos;
+ len = le32toh(s_ad->len);
+ plb_num = le32toh(s_ad->lb_num);
+ } else {
+ /* should not be present, but why not */
+ icblen = sizeof(struct long_ad);
+ l_ad = (struct long_ad *) pos;
+ len = le32toh(l_ad->len);
+ plb_num = le32toh(l_ad->loc.lb_num);
+ /* pvpart_num = le16toh(l_ad->loc.part_num); */
+ }
+ /* process extent */
+ flags = UDF_EXT_FLAGS(len);
+ len = UDF_EXT_LEN(len);
+
+ part_start = phys_part_start + plb_num;
+ part_end = part_start + (len / sector_size);
+
+ if ((part_start >= track_start) && (part_end <= track_end)) {
+ /* extent is enclosed within this track */
+ ump->metadata_track = *trackinfo;
+ return;
+ }
+
+ pos += icblen;
+ alloclen -= icblen;
+ }
+}
+
+
+int
+udf_search_writing_tracks(struct udf_mount *ump)
+{
+ struct vnode *devvp = ump->devvp;
+ struct mmc_trackinfo trackinfo;
+ struct mmc_op mmc_op;
+ struct part_desc *part;
+ uint32_t tracknr, start_track, num_tracks;
+ uint32_t track_start, track_end, part_start, part_end;
+ int node_alloc, error;
+
+ /*
+ * in the CD/(HD)DVD/BD recordable device model a few tracks within
+ * the last session might be open but in the UDF device model at most
+ * three tracks can be open: a reserved track for delayed ISO VRS
+ * writing, a data track and a metadata track. We search here for the
+ * data track and the metadata track. Note that the reserved track is
+ * troublesome but can be detected by its small size of < 512 sectors.
+ */
+
+ /* update discinfo since it might have changed */
+ error = udf_update_discinfo(ump);
+ if (error)
+ return (error);
+
+ num_tracks = ump->discinfo.num_tracks;
+ start_track = ump->discinfo.first_track;
+
+ /* fetch info on first and possibly only track */
+ trackinfo.tracknr = start_track;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+
+ /* copy results to our mount point */
+ ump->data_track = trackinfo;
+ ump->metadata_track = trackinfo;
+
+ /* if not sequential, we're done */
+ if (num_tracks == 1)
+ return (0);
+
+ for (tracknr = start_track;tracknr <= num_tracks; tracknr++) {
+ /* get track info */
+ trackinfo.tracknr = tracknr;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+
+ /*
+ * If this track is marked damaged, ask for repair. This is an
+ * optional command, so ignore its error but report warning.
+ */
+ if (trackinfo.flags & MMC_TRACKINFO_DAMAGED) {
+ memset(&mmc_op, 0, sizeof(mmc_op));
+ mmc_op.operation = MMC_OP_REPAIRTRACK;
+ mmc_op.mmc_profile = ump->discinfo.mmc_profile;
+ mmc_op.tracknr = tracknr;
+ error = VOP_IOCTL(devvp, MMCOP, &mmc_op, FKIOCTL, NOCRED);
+ if (error)
+ (void)printf("Drive can't explicitly repair "
+ "damaged track %d, but it might "
+ "autorepair\n", tracknr);
+
+ /* reget track info */
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+ }
+ if ((trackinfo.flags & MMC_TRACKINFO_NWA_VALID) == 0)
+ continue;
+
+ track_start = trackinfo.track_start;
+ track_end = track_start + trackinfo.track_size;
+
+ /* check for overlap on data partition */
+ part = ump->partitions[ump->data_part];
+ part_start = le32toh(part->start_loc);
+ part_end = part_start + le32toh(part->part_len);
+ if ((part_start < track_end) && (part_end > track_start)) {
+ ump->data_track = trackinfo;
+ /* TODO check if UDF partition data_part is writable */
+ }
+
+ /* check for overlap on metadata partition */
+ node_alloc = ump->vtop_alloc[ump->node_part];
+ if ((node_alloc == UDF_ALLOC_METASEQUENTIAL) ||
+ (node_alloc == UDF_ALLOC_METABITMAP)) {
+ udf_check_track_metadata_overlap(ump, &trackinfo);
+ } else {
+ ump->metadata_track = trackinfo;
+ }
+ }
+
+ if ((ump->data_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
+ return (EROFS);
+
+ if ((ump->metadata_track.flags & MMC_TRACKINFO_NWA_VALID) == 0)
+ return (EROFS);
+
+ return (0);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Check if the blob starts with a good UDF tag. Tags are protected by a
+ * checksum over the reader except one byte at position 4 that is the checksum
+ * itself.
+ */
+
+int
+udf_check_tag(void *blob)
+{
+ struct desc_tag *tag = blob;
+ uint8_t *pos, sum, cnt;
+
+ /* check TAG header checksum */
+ pos = (uint8_t *) tag;
+ sum = 0;
+
+ for(cnt = 0; cnt < 16; cnt++) {
+ if (cnt != 4)
+ sum += *pos;
+ pos++;
+ }
+ if (sum != tag->cksum) {
+ /* bad tag header checksum; this is not a valid tag */
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+
+/*
+ * check tag payload will check descriptor CRC as specified.
+ * If the descriptor is too long, it will return EIO otherwise EINVAL.
+ */
+
+int
+udf_check_tag_payload(void *blob, uint32_t max_length)
+{
+ struct desc_tag *tag = blob;
+ uint16_t crc, crc_len;
+
+ crc_len = le16toh(tag->desc_crc_len);
+
+ /* check payload CRC if applicable */
+ if (crc_len == 0)
+ return (0);
+
+ if (crc_len > max_length)
+ return (EIO);
+
+ crc = udf_cksum(((uint8_t *) tag) + UDF_DESC_TAG_LENGTH, crc_len);
+ if (crc != le16toh(tag->desc_crc)) {
+ /* bad payload CRC; this is a broken tag */
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+
+void
+udf_validate_tag_sum(void *blob)
+{
+ struct desc_tag *tag = blob;
+ uint8_t *pos, sum, cnt;
+
+ /* calculate TAG header checksum */
+ pos = (uint8_t *) tag;
+ sum = 0;
+
+ for(cnt = 0; cnt < 16; cnt++) {
+ if (cnt != 4) sum += *pos;
+ pos++;
+ }
+ tag->cksum = sum; /* 8 bit */
+}
+
+
+/* assumes sector number of descriptor to be saved already present */
+void
+udf_validate_tag_and_crc_sums(void *blob)
+{
+ struct desc_tag *tag = blob;
+ uint16_t crc, crc_len;
+ uint8_t *btag = (uint8_t *) tag;
+
+ crc_len = le16toh(tag->desc_crc_len);
+
+ /* check payload CRC if applicable */
+ if (crc_len > 0) {
+ crc = udf_cksum(btag + UDF_DESC_TAG_LENGTH, crc_len);
+ tag->desc_crc = htole16(crc);
+ }
+
+ /* calculate TAG header checksum */
+ udf_validate_tag_sum(blob);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * XXX note the different semantics from udfclient: for FIDs it still rounds
+ * up to sectors. Use udf_fidsize() for a correct length.
+ */
+
+int
+udf_tagsize(union dscrptr *dscr, uint32_t lb_size)
+{
+ uint32_t size, tag_id, num_lb, elmsz;
+
+ tag_id = le16toh(dscr->tag.id);
+
+ switch (tag_id) {
+ case TAGID_LOGVOL :
+ size = sizeof(struct logvol_desc) - 1;
+ size += le32toh(dscr->lvd.mt_l);
+ break;
+ case TAGID_UNALLOC_SPACE :
+ elmsz = sizeof(struct extent_ad);
+ size = sizeof(struct unalloc_sp_desc) - elmsz;
+ size += le32toh(dscr->usd.alloc_desc_num) * elmsz;
+ break;
+ case TAGID_FID :
+ size = UDF_FID_SIZE + dscr->fid.l_fi + le16toh(dscr->fid.l_iu);
+ size = (size + 3) & ~3;
+ break;
+ case TAGID_LOGVOL_INTEGRITY :
+ size = sizeof(struct logvol_int_desc) - sizeof(uint32_t);
+ size += le32toh(dscr->lvid.l_iu);
+ size += (2 * le32toh(dscr->lvid.num_part) * sizeof(uint32_t));
+ break;
+ case TAGID_SPACE_BITMAP :
+ size = sizeof(struct space_bitmap_desc) - 1;
+ size += le32toh(dscr->sbd.num_bytes);
+ break;
+ case TAGID_SPARING_TABLE :
+ elmsz = sizeof(struct spare_map_entry);
+ size = sizeof(struct udf_sparing_table) - elmsz;
+ size += le16toh(dscr->spt.rt_l) * elmsz;
+ break;
+ case TAGID_FENTRY :
+ size = sizeof(struct file_entry);
+ size += le32toh(dscr->fe.l_ea) + le32toh(dscr->fe.l_ad)-1;
+ break;
+ case TAGID_EXTFENTRY :
+ size = sizeof(struct extfile_entry);
+ size += le32toh(dscr->efe.l_ea) + le32toh(dscr->efe.l_ad)-1;
+ break;
+ case TAGID_FSD :
+ size = sizeof(struct fileset_desc);
+ break;
+ default :
+ size = sizeof(union dscrptr);
+ break;
+ }
+
+ if ((size == 0) || (lb_size == 0))
+ return (0);
+
+ if (lb_size == 1)
+ return (size);
+
+ /* round up in sectors */
+ num_lb = (size + lb_size -1) / lb_size;
+ return (num_lb * lb_size);
+}
+
+
+int
+udf_fidsize(struct fileid_desc *fid)
+{
+ uint32_t size;
+
+ if (le16toh(fid->tag.id) != TAGID_FID)
+ panic("got udf_fidsize on non FID\n");
+
+ size = UDF_FID_SIZE + fid->l_fi + le16toh(fid->l_iu);
+ size = (size + 3) & ~3;
+
+ return (size);
+}
+
+/* --------------------------------------------------------------------- */
+
+void
+udf_lock_node(struct udf_node *udf_node, int flag, char const *fname, const int lineno)
+{
+#if 0
+ int ret;
+
+ mutex_enter(&udf_node->node_mutex);
+ /* wait until free */
+ while (udf_node->i_flags & IN_LOCKED) {
+ ret = cv_timedwait(&udf_node->node_lock, &udf_node->node_mutex, hz/8);
+ /* TODO check if we should return error; abort */
+ if (ret == EWOULDBLOCK) {
+ DPRINTF(LOCKING, ( "udf_lock_node: udf_node %p would block "
+ "wanted at %s:%d, previously locked at %s:%d\n",
+ udf_node, fname, lineno,
+ udf_node->lock_fname, udf_node->lock_lineno));
+ }
+ }
+ /* grab */
+ udf_node->i_flags |= IN_LOCKED | flag;
+ /* debug */
+ udf_node->lock_fname = fname;
+ udf_node->lock_lineno = lineno;
+
+ mutex_exit(&udf_node->node_mutex);
+#endif
+}
+
+
+void
+udf_unlock_node(struct udf_node *udf_node, int flag)
+{
+#if 0
+ mutex_enter(&udf_node->node_mutex);
+ udf_node->i_flags &= ~(IN_LOCKED | flag);
+ cv_broadcast(&udf_node->node_lock);
+ mutex_exit(&udf_node->node_mutex);
+#endif
+}
+
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_read_anchor(struct udf_mount *ump, uint32_t sector, struct anchor_vdp **dst)
+{
+ int error;
+
+ error = udf_read_phys_dscr(ump, sector, M_UDFTEMP,
+ (union dscrptr **) dst);
+ if (!error) {
+ /* blank terminator blocks are not allowed here */
+ if (*dst == NULL)
+ return (ENOENT);
+ if (le16toh((*dst)->tag.id) != TAGID_ANCHOR) {
+ error = ENOENT;
+ free(*dst, M_UDFTEMP);
+ *dst = NULL;
+ }
+ }
+
+ return (error);
+}
+
+void
+udf_print_anchors(struct udf_mount *ump)
+{
+ struct anchor_vdp *a;
+ int i = 0;
+
+ for (i = 0; ump->anchors[i] != NULL; i++) ;
+
+ printf("Number of anchors: %d\n", i);
+
+ for (i = 0; i < UDF_ANCHORS && ump->anchors[i] != NULL; i++) {
+ a = ump->anchors[i];
+ printf("\tTag ID: %d\n",a->tag.id);
+ printf("\tDescriptor Version: %d\n",a->tag.descriptor_ver);
+ printf("\tTag Serial number: %d\n", a->tag.serial_num);
+ printf("\tTag Location: %d\n", a->tag.tag_loc);
+
+ printf("\tmain volumn descriptor seq extent: %d, len: %d\n",
+ a->main_vds_ex.loc, a->main_vds_ex.len);
+
+ printf("\tmain volumn descriptor seq extent: %d, len: %d\n",
+ a->reserve_vds_ex.loc, a->reserve_vds_ex.len);
+ }
+}
+
+#if 0
+int
+udf_last_session_info()
+{
+ struct udf_args *args = &ump->mount_args;
+ struct mmc_trackinfo first_track;
+ struct mmc_trackinfo second_track;
+ struct mmc_trackinfo last_track;
+ uint32_t track_start;
+ uint32_t track_end;
+ int first_tracknr, last_tracknr;
+ int error;
+
+ /* search the first and last track of the specified session */
+ error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
+ if (!error) {
+ first_track.tracknr = first_tracknr;
+ error = udf_update_trackinfo(ump, &first_track);
+ }
+ if (!error) {
+ last_track.tracknr = last_tracknr;
+ error = udf_update_trackinfo(ump, &last_track);
+ }
+ if ((!error) && (first_tracknr != last_tracknr)) {
+ second_track.tracknr = first_tracknr+1;
+ error = udf_update_trackinfo(ump, &second_track);
+ }
+ if (error) {
+ printf("UDF mount: reading disc geometry failed\n");
+ return (0);
+ }
+
+ track_start = first_track.track_start;
+
+ /* `end' is not as straitforward as start. */
+ track_end = last_track.track_start
+ + last_track.track_size - last_track.free_blocks - 1;
+
+ if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
+ /* end of track is not straitforward here */
+ if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
+ track_end = last_track.last_recorded;
+ else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
+ track_end = last_track.next_writable
+ - ump->discinfo.link_block_penalty;
+ }
+
+ /* its no use reading a blank track */
+ if (first_track.flags & MMC_TRACKINFO_BLANK)
+ ump->first_trackblank = 1;
+
+ /* get our packet size */
+ ump->packet_size = first_track.packet_size;
+ if (first_track.flags & MMC_TRACKINFO_BLANK)
+ ump->packet_size = second_track.packet_size;
+
+ if (ump->packet_size <= 1) {
+ /* take max, but not bigger than 64 */
+ ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
+ ump->packet_size = MIN(ump->packet_size, 64);
+ }
+ KASSERT(ump->packet_size >= 1);
+
+ /* VATs are only recorded on sequential media, but initialise */
+ ump->first_possible_vat_location = track_start + 2;
+ ump->last_possible_vat_location = track_end + last_track.packet_size;
+ ump->session_start = track_start;
+ ump->session_end = track_end
+}
+#endif
+
+int
+udf_read_anchors(struct udf_mount *ump)
+{
+ struct udf_args *args = &ump->mount_args;
+ struct mmc_trackinfo first_track;
+ struct mmc_trackinfo second_track;
+ struct mmc_trackinfo last_track;
+ struct anchor_vdp **anchorsp;
+ uint32_t track_start;
+ uint32_t track_end;
+ uint32_t positions[4];
+ int first_tracknr, last_tracknr;
+ int error, anch, ok, first_anchor;
+
+ /* search the first and last track of the specified session */
+ error = udf_search_tracks(ump, args, &first_tracknr, &last_tracknr);
+ if (!error) {
+ first_track.tracknr = first_tracknr;
+ error = udf_update_trackinfo(ump, &first_track);
+ }
+ if (!error) {
+ last_track.tracknr = last_tracknr;
+ error = udf_update_trackinfo(ump, &last_track);
+ }
+ if ((!error) && (first_tracknr != last_tracknr)) {
+ second_track.tracknr = first_tracknr+1;
+ error = udf_update_trackinfo(ump, &second_track);
+ }
+ if (error) {
+ printf("UDF mount: reading disc geometry failed\n");
+ return (0);
+ }
+
+ track_start = first_track.track_start;
+
+ /* `end' is not as straitforward as start. */
+ track_end = last_track.track_start
+ + last_track.track_size - last_track.free_blocks - 1;
+
+ if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
+ /* end of track is not straitforward here */
+ if (last_track.flags & MMC_TRACKINFO_LRA_VALID)
+ track_end = last_track.last_recorded;
+ else if (last_track.flags & MMC_TRACKINFO_NWA_VALID)
+ track_end = last_track.next_writable
+ - ump->discinfo.link_block_penalty;
+ }
+
+ /* its no use reading a blank track */
+ first_anchor = 0;
+ if (first_track.flags & MMC_TRACKINFO_BLANK)
+ first_anchor = 1;
+
+ /* get our packet size */
+ ump->packet_size = first_track.packet_size;
+ if (first_track.flags & MMC_TRACKINFO_BLANK)
+ ump->packet_size = second_track.packet_size;
+
+ if (ump->packet_size <= 1) {
+ /* take max, but not bigger than 64 */
+ ump->packet_size = MAXPHYS / ump->discinfo.sector_size;
+ ump->packet_size = MIN(ump->packet_size, 64);
+ }
+ KASSERT(ump->packet_size >= 1, ("udf_read_anchors: packet size is less than one"));
+
+ /* read anchors start+256, start+512, end-256, end */
+ positions[0] = track_start+256;
+ positions[1] = track_end-256;
+ positions[2] = track_end;
+ positions[3] = track_start+512; /* [UDF 2.60/6.11.2] */
+ /* XXX shouldn't +512 be preferred above +256 for compat with Roxio CD */
+
+ ok = 0;
+ anchorsp = ump->anchors;
+ for (anch = first_anchor; anch < 4; anch++) {
+ DPRINTF(VOLUMES, ("Read anchor %d at sector %d\n", anch,
+ positions[anch]));
+ error = udf_read_anchor(ump, positions[anch], anchorsp);
+ if (!error) {
+ anchorsp++;
+ ok++;
+ }
+ }
+
+ /* VATs are only recorded on sequential media, but initialise */
+ ump->first_possible_vat_location = track_start + 2;
+ ump->last_possible_vat_location = track_end + last_track.packet_size;
+
+ return (ok);
+}
+
+/* --------------------------------------------------------------------- */
+#if 0
+int
+udf_get_c_type(struct udf_node *udf_node)
+{
+ int isdir, what;
+
+ isdir = (udf_node->vnode->v_type == VDIR);
+ what = isdir ? UDF_C_FIDS : UDF_C_USERDATA;
+
+ if (udf_node->ump)
+ if (udf_node == udf_node->ump->metadatabitmap_node)
+ what = UDF_C_METADATA_SBM;
+
+ return (what);
+}
+
+
+int
+udf_get_record_vpart(struct udf_mount *ump, int udf_c_type)
+{
+ int vpart_num;
+
+ vpart_num = ump->data_part;
+ if (udf_c_type == UDF_C_NODE)
+ vpart_num = ump->node_part;
+ if (udf_c_type == UDF_C_FIDS)
+ vpart_num = ump->fids_part;
+
+ return (vpart_num);
+}
+#endif
+
+/*
+ * BUGALERT: some rogue implementations use random physical partition
+ * numbers to break other implementations so lookup the number.
+ */
+
+static uint16_t
+udf_find_raw_phys(struct udf_mount *ump, uint16_t raw_phys_part)
+{
+ struct part_desc *part;
+ uint16_t phys_part;
+
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ part = ump->partitions[phys_part];
+ if (part == NULL)
+ break;
+ if (le16toh(part->part_num) == raw_phys_part)
+ break;
+ }
+ return (phys_part);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* we dont try to be smart; we just record the parts */
+#define UDF_UPDATE_DSCR(name, dscr) \
+ if (name) \
+ free(name, M_UDFTEMP); \
+ name = dscr;
+
+static int
+udf_process_vds_descriptor(struct udf_mount *ump, union dscrptr *dscr)
+{
+ uint16_t phys_part, raw_phys_part;
+
+ DPRINTF(VOLUMES, ("\tprocessing VDS descr %d\n",
+ le16toh(dscr->tag.id)));
+ switch (le16toh(dscr->tag.id)) {
+ case TAGID_PRI_VOL : /* primary partition */
+ UDF_UPDATE_DSCR(ump->primary_vol, &dscr->pvd);
+ break;
+ case TAGID_LOGVOL : /* logical volume */
+ UDF_UPDATE_DSCR(ump->logical_vol, &dscr->lvd);
+ break;
+ case TAGID_UNALLOC_SPACE : /* unallocated space */
+ UDF_UPDATE_DSCR(ump->unallocated, &dscr->usd);
+ break;
+ case TAGID_IMP_VOL : /* implementation */
+ /* XXX do we care about multiple impl. descr ? */
+ UDF_UPDATE_DSCR(ump->implementation, &dscr->ivd);
+ break;
+ case TAGID_PARTITION : /* physical partition */
+ /* not much use if its not allocated */
+ if ((le16toh(dscr->pd.flags) & UDF_PART_FLAG_ALLOCATED) == 0) {
+ free(dscr, M_UDFTEMP);
+ break;
+ }
+
+ /*
+ * BUGALERT: some rogue implementations use random physical
+ * partition numbers to break other implementations so lookup
+ * the number.
+ */
+ raw_phys_part = le16toh(dscr->pd.part_num);
+ phys_part = udf_find_raw_phys(ump, raw_phys_part);
+
+ if (phys_part == UDF_PARTITIONS) {
+ free(dscr, M_UDFTEMP);
+ return (EINVAL);
+ }
+
+ UDF_UPDATE_DSCR(ump->partitions[phys_part], &dscr->pd);
+ break;
+ case TAGID_VOL : /* volume space extender; rare */
+ DPRINTF(VOLUMES, ("VDS extender ignored\n"));
+ free(dscr, M_UDFTEMP);
+ break;
+ default :
+ DPRINTF(VOLUMES, ("Unhandled VDS type %d\n",
+ le16toh(dscr->tag.id)));
+ free(dscr, M_UDFTEMP);
+ }
+
+ return (0);
+}
+#undef UDF_UPDATE_DSCR
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_read_vds_extent(struct udf_mount *ump, uint32_t loc, uint32_t len)
+{
+ union dscrptr *dscr;
+ uint32_t sector_size, dscr_size;
+ int error;
+
+ sector_size = ump->discinfo.sector_size;
+
+ /* loc is sectornr, len is in bytes */
+ error = EIO;
+ while (len) {
+ error = udf_read_phys_dscr(ump, loc, M_UDFTEMP, &dscr);
+ if (error) {
+ if (!dscr)
+ free(dscr, M_UDFTEMP);
+ return (error);
+ }
+
+ /* blank block is a terminator */
+ if (dscr == NULL)
+ return (0);
+
+ /* TERM descriptor is a terminator */
+ if (le16toh(dscr->tag.id) == TAGID_TERM) {
+ free(dscr, M_UDFTEMP);
+ return (0);
+ }
+
+ /* process all others */
+ dscr_size = udf_tagsize(dscr, sector_size);
+
+ /* dscr is assigned into ump */
+ error = udf_process_vds_descriptor(ump, dscr);
+ if (error) {
+ free(dscr, M_UDFTEMP);
+ break;
+ }
+ KASSERT(!(dscr_size % sector_size), ("udf_read_vds_extent:"
+ "wrong descriptor sze"));
+ len -= dscr_size;
+ loc += dscr_size / sector_size;
+ }
+
+ return (error);
+}
+
+
+int
+udf_read_vds_space(struct udf_mount *ump)
+{
+ /* struct udf_args *args = &ump->mount_args; */
+ struct anchor_vdp *anchor, *anchor2;
+ size_t size;
+ int error;
+ uint32_t main_loc, main_len, reserve_loc, reserve_len;
+
+ /*
+ * read in VDS space provided by the anchors; if one descriptor read
+ * fails, try the mirror sector.
+ *
+ * check if 2nd anchor is different from 1st; if so, go for 2nd. This
+ * avoids the `compatibility features' of DirectCD that may confuse
+ * stuff completely.
+ */
+
+ anchor = ump->anchors[0];
+ anchor2 = ump->anchors[1];
+ KASSERT(anchor, ("udf_read_vds_space: anchor is not valid"));
+
+ if (anchor2) {
+ size = sizeof(struct extent_ad);
+ if (memcmp(&anchor->main_vds_ex, &anchor2->main_vds_ex, size))
+ anchor = anchor2;
+ /* reserve is specified to be a literal copy of main */
+ }
+
+ main_loc = le32toh(anchor->main_vds_ex.loc);
+ main_len = le32toh(anchor->main_vds_ex.len);
+
+ reserve_loc = le32toh(anchor->reserve_vds_ex.loc);
+ reserve_len = le32toh(anchor->reserve_vds_ex.len);
+
+ error = udf_read_vds_extent(ump, main_loc, main_len);
+ if (error) {
+ printf("UDF mount: reading in reserve VDS extent\n");
+ error = udf_read_vds_extent(ump, reserve_loc, reserve_len);
+ }
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Read in the logical volume integrity sequence pointed to by our logical
+ * volume descriptor. Its a sequence that can be extended using fields in the
+ * integrity descriptor itself. On sequential media only one is found, on
+ * rewritable media a sequence of descriptors can be found as a form of
+ * history keeping and on non sequential write-once media the chain is vital
+ * to allow more and more descriptors to be written. The last descriptor
+ * written in an extent needs to claim space for a new extent.
+ */
+
+static int
+udf_retrieve_lvint(struct udf_mount *ump)
+{
+ union dscrptr *dscr;
+ struct logvol_int_desc *lvint;
+ struct udf_lvintq *trace;
+ int dscr_type, error, trace_len;
+ uint32_t lb_size, lbnum, len;
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ len = le32toh(ump->logical_vol->integrity_seq_loc.len);
+ lbnum = le32toh(ump->logical_vol->integrity_seq_loc.loc);
+
+ /* clean trace */
+ memset(ump->lvint_trace, 0,
+ UDF_LVDINT_SEGMENTS * sizeof(struct udf_lvintq));
+
+ trace_len = 0;
+ trace = ump->lvint_trace;
+ trace->start = lbnum;
+ trace->end = lbnum + len/lb_size;
+ trace->pos = 0;
+ trace->wpos = 0;
+
+ lvint = NULL;
+ dscr = NULL;
+ error = 0;
+ while (len) {
+ trace->pos = lbnum - trace->start;
+ trace->wpos = trace->pos + 1;
+
+ /* read in our integrity descriptor */
+ error = udf_read_phys_dscr(ump, lbnum, M_UDFTEMP, &dscr);
+ if (!error) {
+ if (dscr == NULL) {
+ trace->wpos = trace->pos;
+ break; /* empty terminates */
+ }
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_TERM) {
+ trace->wpos = trace->pos;
+ break; /* clean terminator */
+ }
+ if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
+ /* fatal... corrupt disc */
+ error = ENOENT;
+ break;
+ }
+ if (lvint)
+ free(lvint, M_UDFTEMP);
+ lvint = &dscr->lvid;
+ dscr = NULL;
+ } /* else hope for the best... maybe the next is ok */
+
+ DPRINTFIF(VOLUMES, lvint, ("logvol integrity read, state %s\n",
+ le32toh(lvint->integrity_type) ? "CLOSED" : "OPEN"));
+
+ /* proceed sequential */
+ lbnum += 1;
+ len -= lb_size;
+
+ /* are we linking to a new piece? */
+ if (dscr && lvint->next_extent.len) {
+ len = le32toh(lvint->next_extent.len);
+ lbnum = le32toh(lvint->next_extent.loc);
+
+ if (trace_len >= UDF_LVDINT_SEGMENTS-1) {
+ /* IEK! segment link full... */
+ DPRINTF(VOLUMES, ("lvdint segments full\n"));
+ error = EINVAL;
+ } else {
+ trace++;
+ trace_len++;
+
+ trace->start = lbnum;
+ trace->end = lbnum + len/lb_size;
+ trace->pos = 0;
+ trace->wpos = 0;
+ }
+ }
+ }
+
+ /* clean up the mess, esp. when there is an error */
+ if (dscr)
+ free(dscr, M_UDFTEMP);
+
+ if (error && lvint) {
+ free(lvint, M_UDFTEMP);
+ lvint = NULL;
+ }
+
+ if (!lvint)
+ error = ENOENT;
+
+ ump->logvol_integrity = lvint;
+ return (error);
+}
+
+#if 0
+static int
+udf_loose_lvint_history(struct udf_mount *ump)
+{
+ union dscrptr **bufs, *dscr, *last_dscr;
+ struct udf_lvintq *trace, *in_trace, *out_trace;
+ struct logvol_int_desc *lvint;
+ uint32_t in_ext, in_pos, in_len;
+ uint32_t out_ext, out_wpos, out_len;
+ uint32_t lb_size, packet_size, lb_num;
+ uint32_t len, start;
+ int ext, minext, extlen, cnt, cpy_len, dscr_type;
+ int losing;
+ int error;
+
+ DPRINTF(VOLUMES, ("need to lose some lvint history\n"));
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ packet_size = ump->data_track.packet_size; /* XXX data track */
+
+ /* search smallest extent */
+ trace = &ump->lvint_trace[0];
+ minext = trace->end - trace->start;
+ for (ext = 1; ext < UDF_LVDINT_SEGMENTS; ext++) {
+ trace = &ump->lvint_trace[ext];
+ extlen = trace->end - trace->start;
+ if (extlen == 0)
+ break;
+ minext = MIN(minext, extlen);
+ }
+ losing = MIN(minext, UDF_LVINT_LOSSAGE);
+ /* no sense wiping all */
+ if (losing == minext)
+ losing--;
+
+ DPRINTF(VOLUMES, ("\tlosing %d entries\n", losing));
+
+ /* get buffer for pieces */
+ bufs = malloc(UDF_LVDINT_SEGMENTS * sizeof(void *), M_UDFTEMP, M_WAITOK);
+
+ in_ext = 0;
+ in_pos = losing;
+ in_trace = &ump->lvint_trace[in_ext];
+ in_len = in_trace->end - in_trace->start;
+ out_ext = 0;
+ out_wpos = 0;
+ out_trace = &ump->lvint_trace[out_ext];
+ out_len = out_trace->end - out_trace->start;
+
+ last_dscr = NULL;
+ for(;;) {
+ out_trace->pos = out_wpos;
+ out_trace->wpos = out_trace->pos;
+ if (in_pos >= in_len) {
+ in_ext++;
+ in_pos = 0;
+ in_trace = &ump->lvint_trace[in_ext];
+ in_len = in_trace->end - in_trace->start;
+ }
+ if (out_wpos >= out_len) {
+ out_ext++;
+ out_wpos = 0;
+ out_trace = &ump->lvint_trace[out_ext];
+ out_len = out_trace->end - out_trace->start;
+ }
+ /* copy overlap contents */
+ cpy_len = MIN(in_len - in_pos, out_len - out_wpos);
+ cpy_len = MIN(cpy_len, in_len - in_trace->pos);
+ if (cpy_len == 0)
+ break;
+
+ /* copy */
+ DPRINTF(VOLUMES, ("\treading %d lvid descriptors\n", cpy_len));
+ for (cnt = 0; cnt < cpy_len; cnt++) {
+ /* read in our integrity descriptor */
+ lb_num = in_trace->start + in_pos + cnt;
+ error = udf_read_phys_dscr(ump, lb_num, M_UDFTEMP,
+ &dscr);
+ if (error) {
+ /* copy last one */
+ dscr = last_dscr;
+ }
+ bufs[cnt] = dscr;
+ if (!error) {
+ if (dscr == NULL) {
+ out_trace->pos = out_wpos + cnt;
+ out_trace->wpos = out_trace->pos;
+ break; /* empty terminates */
+ }
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_TERM) {
+ out_trace->pos = out_wpos + cnt;
+ out_trace->wpos = out_trace->pos;
+ break; /* clean terminator */
+ }
+ if (dscr_type != TAGID_LOGVOL_INTEGRITY) {
+ panic( "UDF integrity sequence "
+ "corrupted while mounted!\n");
+ }
+ last_dscr = dscr;
+ }
+ }
+
+ /* patch up if first entry was on error */
+ if (bufs[0] == NULL) {
+ for (cnt = 0; cnt < cpy_len; cnt++)
+ if (bufs[cnt] != NULL)
+ break;
+ last_dscr = bufs[cnt];
+ for (; cnt > 0; cnt--) {
+ bufs[cnt] = last_dscr;
+ }
+ }
+
+ /* glue + write out */
+ DPRINTF(VOLUMES, ("\twriting %d lvid descriptors\n", cpy_len));
+ for (cnt = 0; cnt < cpy_len; cnt++) {
+ lb_num = out_trace->start + out_wpos + cnt;
+ lvint = &bufs[cnt]->lvid;
+
+ /* set continuation */
+ len = 0;
+ start = 0;
+ if (out_wpos + cnt == out_len) {
+ /* get continuation */
+ trace = &ump->lvint_trace[out_ext+1];
+ len = trace->end - trace->start;
+ start = trace->start;
+ }
+ lvint->next_extent.len = htole32(len);
+ lvint->next_extent.loc = htole32(start);
+
+ lb_num = trace->start + trace->wpos;
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
+ bufs[cnt], lb_num, lb_num);
+ DPRINTFIF(VOLUMES, error,
+ ("error writing lvint lb_num\n"));
+ }
+
+ /* free non repeating descriptors */
+ last_dscr = NULL;
+ for (cnt = 0; cnt < cpy_len; cnt++) {
+ if (bufs[cnt] != last_dscr)
+ free(bufs[cnt], M_UDFTEMP);
+ last_dscr = bufs[cnt];
+ }
+
+ /* advance */
+ in_pos += cpy_len;
+ out_wpos += cpy_len;
+ }
+
+ free(bufs, M_UDFTEMP);
+
+ return (0);
+}
+
+
+static int
+udf_writeout_lvint(struct udf_mount *ump, int lvflag)
+{
+ struct udf_lvintq *trace;
+ struct timeval now_v;
+ struct timespec now_s;
+ uint32_t sector;
+ int logvol_integrity;
+ int space, error;
+
+ DPRINTF(VOLUMES, ("writing out logvol integrity descriptor\n"));
+
+again:
+ /* get free space in last chunk */
+ trace = ump->lvint_trace;
+ while (trace->wpos > (trace->end - trace->start)) {
+ DPRINTF(VOLUMES, ("skip : start = %d, end = %d, pos = %d, "
+ "wpos = %d\n", trace->start, trace->end,
+ trace->pos, trace->wpos));
+ trace++;
+ }
+
+ /* check if there is space to append */
+ space = (trace->end - trace->start) - trace->wpos;
+ DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
+ "space = %d\n", trace->start, trace->end, trace->pos,
+ trace->wpos, space));
+
+ /* get state */
+ logvol_integrity = le32toh(ump->logvol_integrity->integrity_type);
+ if (logvol_integrity == UDF_INTEGRITY_CLOSED) {
+ if ((space < 3) && (lvflag & UDF_APPENDONLY_LVINT)) {
+ /* don't allow this logvol to be opened */
+ /* TODO extent LVINT space if possible */
+ return (EROFS);
+ }
+ }
+
+ if (space < 1) {
+ if (lvflag & UDF_APPENDONLY_LVINT)
+ return (EROFS);
+ /* loose history by re-writing extents */
+ error = udf_loose_lvint_history(ump);
+ if (error)
+ return (error);
+ goto again;
+ }
+
+ /* update our integrity descriptor to identify us and timestamp it */
+ DPRINTF(VOLUMES, ("updating integrity descriptor\n"));
+ microtime(&now_v);
+ TIMEVAL_TO_TIMESPEC(&now_v, &now_s);
+ udf_timespec_to_timestamp(&now_s, &ump->logvol_integrity->time);
+ udf_set_regid(&ump->logvol_info->impl_id, IMPL_NAME);
+ udf_add_impl_regid(ump, &ump->logvol_info->impl_id);
+
+ /* writeout integrity descriptor */
+ sector = trace->start + trace->wpos;
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
+ (union dscrptr *) ump->logvol_integrity,
+ sector, sector);
+ DPRINTF(VOLUMES, ("writeout lvint : error = %d\n", error));
+ if (error)
+ return (error);
+
+ /* advance write position */
+ trace->wpos++; space--;
+ if (space >= 1) {
+ /* append terminator */
+ sector = trace->start + trace->wpos;
+ error = udf_write_terminator(ump, sector);
+
+ DPRINTF(VOLUMES, ("write terminator : error = %d\n", error));
+ }
+
+ space = (trace->end - trace->start) - trace->wpos;
+ DPRINTF(VOLUMES, ("write start = %d, end = %d, pos = %d, wpos = %d, "
+ "space = %d\n", trace->start, trace->end, trace->pos,
+ trace->wpos, space));
+ DPRINTF(VOLUMES, ("finished writing out logvol integrity descriptor "
+ "successfull\n"));
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_read_physical_partition_spacetables(struct udf_mount *ump)
+{
+ union dscrptr *dscr;
+ /* struct udf_args *args = &ump->mount_args; */
+ struct part_desc *partd;
+ struct part_hdr_desc *parthdr;
+ struct udf_bitmap *bitmap;
+ uint32_t phys_part;
+ uint32_t lb_num, len;
+ int error, dscr_type;
+
+ /* unallocated space map */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ lb_num = le32toh(partd->start_loc);
+ lb_num += le32toh(parthdr->unalloc_space_bitmap.lb_num);
+ len = le32toh(parthdr->unalloc_space_bitmap.len);
+ if (len == 0)
+ continue;
+
+ DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
+ error = udf_read_phys_dscr(ump, lb_num, M_UDFTEMP, &dscr);
+ if (!error && dscr) {
+ /* analyse */
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_SPACE_BITMAP) {
+ DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
+ ump->part_unalloc_dscr[phys_part] = &dscr->sbd;
+
+ /* fill in ump->part_unalloc_bits */
+ bitmap = &ump->part_unalloc_bits[phys_part];
+ bitmap->blob = (uint8_t *) dscr;
+ bitmap->bits = dscr->sbd.data;
+ bitmap->max_offset = le32toh(dscr->sbd.num_bits);
+ bitmap->pages = NULL; /* TODO */
+ bitmap->data_pos = 0;
+ bitmap->metadata_pos = 0;
+ } else {
+ free(dscr, M_UDFTEMP);
+
+ printf( "UDF mount: error reading unallocated "
+ "space bitmap\n");
+ return (EROFS);
+ }
+ } else {
+ /* blank not allowed */
+ printf("UDF mount: blank unallocated space bitmap\n");
+ return (EROFS);
+ }
+ }
+
+ /* unallocated space table (not supported) */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ len = le32toh(parthdr->unalloc_space_table.len);
+ if (len) {
+ printf("UDF mount: space tables not supported\n");
+ return (EROFS);
+ }
+ }
+
+ /* freed space map */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ /* freed space map */
+ lb_num = le32toh(partd->start_loc);
+ lb_num += le32toh(parthdr->freed_space_bitmap.lb_num);
+ len = le32toh(parthdr->freed_space_bitmap.len);
+ if (len == 0)
+ continue;
+
+ DPRINTF(VOLUMES, ("Read unalloc. space bitmap %d\n", lb_num));
+ error = udf_read_phys_dscr(ump, lb_num, M_UDFTEMP, &dscr);
+ if (!error && dscr) {
+ /* analyse */
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_SPACE_BITMAP) {
+ DPRINTF(VOLUMES, ("Accepting space bitmap\n"));
+ ump->part_freed_dscr[phys_part] = &dscr->sbd;
+
+ /* fill in ump->part_freed_bits */
+ bitmap = &ump->part_unalloc_bits[phys_part];
+ bitmap->blob = (uint8_t *) dscr;
+ bitmap->bits = dscr->sbd.data;
+ bitmap->max_offset = le32toh(dscr->sbd.num_bits);
+ bitmap->pages = NULL; /* TODO */
+ bitmap->data_pos = 0;
+ bitmap->metadata_pos = 0;
+ } else {
+ free(dscr, M_UDFTEMP);
+
+ printf( "UDF mount: error reading freed "
+ "space bitmap\n");
+ return (EROFS);
+ }
+ } else {
+ /* blank not allowed */
+ printf("UDF mount: blank freed space bitmap\n");
+ return (EROFS);
+ }
+ }
+
+ /* freed space table (not supported) */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ len = le32toh(parthdr->freed_space_table.len);
+ if (len) {
+ printf("UDF mount: space tables not supported\n");
+ return (EROFS);
+ }
+ }
+
+ return (0);
+}
+
+
+/* TODO implement async writeout */
+int
+udf_write_physical_partition_spacetables(struct udf_mount *ump, int waitfor)
+{
+ union dscrptr *dscr;
+ /* struct udf_args *args = &ump->mount_args; */
+ struct part_desc *partd;
+ struct part_hdr_desc *parthdr;
+ uint32_t phys_part;
+ uint32_t lb_num, len, ptov;
+ int error_all, error;
+
+ error_all = 0;
+ /* unallocated space map */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ ptov = le32toh(partd->start_loc);
+ lb_num = le32toh(parthdr->unalloc_space_bitmap.lb_num);
+ len = le32toh(parthdr->unalloc_space_bitmap.len);
+ if (len == 0)
+ continue;
+
+ DPRINTF(VOLUMES, ("Write unalloc. space bitmap %d\n",
+ lb_num + ptov));
+ dscr = (union dscrptr *) ump->part_unalloc_dscr[phys_part];
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
+ (union dscrptr *) dscr,
+ ptov + lb_num, lb_num);
+ if (error) {
+ DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
+ error_all = error;
+ }
+ }
+
+ /* freed space map */
+ for (phys_part = 0; phys_part < UDF_PARTITIONS; phys_part++) {
+ partd = ump->partitions[phys_part];
+ if (partd == NULL)
+ continue;
+ parthdr = &partd->_impl_use.part_hdr;
+
+ /* freed space map */
+ ptov = le32toh(partd->start_loc);
+ lb_num = le32toh(parthdr->freed_space_bitmap.lb_num);
+ len = le32toh(parthdr->freed_space_bitmap.len);
+ if (len == 0)
+ continue;
+
+ DPRINTF(VOLUMES, ("Write freed space bitmap %d\n",
+ lb_num + ptov));
+ dscr = (union dscrptr *) ump->part_freed_dscr[phys_part];
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
+ (union dscrptr *) dscr,
+ ptov + lb_num, lb_num);
+ if (error) {
+ DPRINTF(VOLUMES, ("\tfailed!! (error %d)\n", error));
+ error_all = error;
+ }
+ }
+
+ return (error_all);
+}
+#endif
+
+#if 0
+static int
+udf_read_metadata_partition_spacetable(struct udf_mount *ump)
+{
+ struct udf_node *bitmap_node;
+ union dscrptr *dscr;
+ struct udf_bitmap *bitmap;
+ uint64_t inflen;
+ int error, dscr_type;
+
+ bitmap_node = ump->metadatabitmap_node;
+
+ /* only read in when metadata bitmap node is read in */
+ if (bitmap_node == NULL)
+ return (0);
+
+ if (bitmap_node->fe) {
+ inflen = le64toh(bitmap_node->fe->inf_len);
+ } else {
+ KASSERT(bitmap_node->efe, "error occured here");
+ inflen = le64toh(bitmap_node->efe->inf_len);
+ }
+
+ DPRINTF(VOLUMES, ("Reading metadata space bitmap for "
+ "%"PRIu64" bytes\n", inflen));
+
+ /* allocate space for bitmap */
+ dscr = malloc(inflen, M_UDFTEMP, M_WAITOK); /*M_CANFAIL was removed from third param*/
+ if (!dscr)
+ return (ENOMEM);
+
+ /* set vnode type to regular file or we can't read from it! */
+ bitmap_node->vnode->v_type = VREG;
+
+ /* read in complete metadata bitmap file */
+ error = udf_read_node(bitmap_node, (uint8_t *)dscr, 0, inflen);
+ /*error = vn_rdwr(UIO_READ, bitmap_node->vnode,
+ dscr,
+ inflen, 0,
+ UIO_SYSSPACE,
+ IO_SYNC | IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED,
+ NULL, NULL);*/
+ if (error) {
+ DPRINTF(VOLUMES, ("Error reading metadata space bitmap\n"));
+ goto errorout;
+ }
+
+ /* analyse */
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_SPACE_BITMAP) {
+ DPRINTF(VOLUMES, ("Accepting metadata space bitmap\n"));
+ ump->metadata_unalloc_dscr = &dscr->sbd;
+
+ /* fill in bitmap bits */
+ bitmap = &ump->metadata_unalloc_bits;
+ bitmap->blob = (uint8_t *) dscr;
+ bitmap->bits = dscr->sbd.data;
+ bitmap->max_offset = le32toh(dscr->sbd.num_bits);
+ bitmap->pages = NULL; /* TODO */
+ bitmap->data_pos = 0;
+ bitmap->metadata_pos = 0;
+ } else {
+ DPRINTF(VOLUMES, ("No valid bitmap found!\n"));
+ goto errorout;
+ }
+
+ return (0);
+
+errorout:
+ free(dscr, M_UDFTEMP);
+ printf( "UDF mount: error reading unallocated "
+ "space bitmap for metadata partition\n");
+ return (EROFS);
+}
+
+
+int
+udf_write_metadata_partition_spacetable(struct udf_mount *ump, int waitfor)
+{
+ struct udf_node *bitmap_node;
+ union dscrptr *dscr;
+ uint64_t inflen, new_inflen;
+ int dummy, error;
+
+ bitmap_node = ump->metadatabitmap_node;
+
+ /* only write out when metadata bitmap node is known */
+ if (bitmap_node == NULL)
+ return (0);
+
+ if (bitmap_node->fe) {
+ inflen = le64toh(bitmap_node->fe->inf_len);
+ } else {
+ KASSERT(bitmap_node->efe);
+ inflen = le64toh(bitmap_node->efe->inf_len);
+ }
+
+ /* reduce length to zero */
+ dscr = (union dscrptr *) ump->metadata_unalloc_dscr;
+ new_inflen = udf_tagsize(dscr, 1);
+
+ DPRINTF(VOLUMES, ("Resize and write out metadata space bitmap from "
+ "%"PRIu64" to %"PRIu64" bytes\n", inflen, new_inflen));
+
+ error = udf_resize_node(bitmap_node, new_inflen, &dummy);
+ if (error)
+ printf("Error resizing metadata space bitmap\n");
+
+ error = vn_rdwr(UIO_WRITE, bitmap_node->vnode,
+ dscr,
+ new_inflen, 0,
+ UIO_SYSSPACE,
+ IO_NODELOCKED | IO_ALTSEMANTICS, FSCRED,
+ NULL, NULL);
+
+ bitmap_node->i_flags |= IN_MODIFIED;
+ vflushbuf(bitmap_node->vnode, 1 /* sync */);
+
+ error = VOP_FSYNC(bitmap_node->vnode,
+ FSCRED, FSYNC_WAIT, 0, 0);
+
+ if (error)
+ printf( "Error writing out metadata partition unalloced "
+ "space bitmap!\n");
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Checks if ump's vds information is correct and complete
+ */
+
+int
+udf_process_vds(struct udf_mount *ump) {
+ /* struct udf_args *args = &ump->mount_args; */
+ union udf_pmap *mapping;
+ struct logvol_int_desc *lvint;
+ struct udf_logvol_info *lvinfo;
+ int pmap_stype, pmap_size, pmap_type, log_part, phys_part, error;
+ int raw_phys_part, maps_on, len;
+ int n_phys, n_virt, n_spar, n_meta;
+ uint32_t n_pm, mt_l;
+ char *domain_name, *map_name; /* bits[128]; */
+ const char *check_name;
+ uint8_t *pmap_pos;
+
+ if (ump == NULL)
+ return (ENOENT);
+
+ /* we need at least an anchor (trivial, but for safety) */
+ if (ump->anchors[0] == NULL)
+ return (EINVAL);
+
+ /* we need at least one primary and one logical volume descriptor */
+ if ((ump->primary_vol == NULL) || (ump->logical_vol) == NULL)
+ return (EINVAL);
+
+ /* we need at least one partition descriptor */
+ if (ump->partitions[0] == NULL)
+ return (EINVAL);
+
+/*
+Check that character set is correct?
+
+ chsp = &ump->logical_vol->desc_charset;
+ is_osta_typ0 = (chsp->type == 0);
+ is_osta_typ0 &= (strcmp((char *) chsp->inf, osta_id) == 0);
+*/
+
+ /* check logical volume sector size verses device sector size */
+ if (le32toh(ump->logical_vol->lb_size) != ump->discinfo.sector_size) {
+ printf("UDF mount: format violation, lb_size != sector size\n");
+ return (EINVAL);
+ }
+
+ /* check domain name */
+ domain_name = ump->logical_vol->domain_id.id;
+ if (strncmp(domain_name, "*OSTA UDF Compliant", 20)) {
+ printf("mount_udf: disc not OSTA UDF Compliant, aborting\n");
+ return (EINVAL);
+ }
+
+ /* retrieve logical volume integrity sequence */
+ error = udf_retrieve_lvint(ump);
+ if (error != 0)
+ return (EINVAL); // previously it always returned this on error.
+
+ /*
+ * We need at least one logvol integrity descriptor recorded. Note
+ * that its OK to have an open logical volume integrity here. The VAT
+ * will close/update the integrity.
+ */
+ if (ump->logvol_integrity == NULL)
+ return (EINVAL);
+
+ /* process derived structures */
+ n_pm = le32toh(ump->logical_vol->n_pm); /* num partmaps */
+ lvint = ump->logvol_integrity;
+ lvinfo = (struct udf_logvol_info *) (&lvint->tables[2 * n_pm]);
+ ump->logvol_info = lvinfo;
+
+ /* TODO check udf versions? */
+
+ /*
+ * check logvol mappings: effective virt->log partmap translation
+ * check and recording of the mapping results. Saves expensive
+ * strncmp() in tight places.
+ */
+ DPRINTF(VOLUMES, ("checking logvol mappings\n"));
+ n_pm = le32toh(ump->logical_vol->n_pm); /* num partmaps */
+ mt_l = le32toh(ump->logical_vol->mt_l); /* partmaps data length */
+ pmap_pos = ump->logical_vol->maps;
+
+ if (n_pm > UDF_PMAPS) {
+ printf("UDF mount: too many mappings\n");
+ return (EINVAL);
+ }
+
+ /* count types and set partition numbers */
+ ump->data_part = ump->node_part = ump->fids_part = 0;
+ n_phys = n_virt = n_spar = n_meta = 0;
+ for (log_part = 0; log_part < n_pm; log_part++) {
+ mapping = (union udf_pmap *) pmap_pos;
+ pmap_stype = pmap_pos[0];
+ pmap_size = pmap_pos[1];
+
+ switch (pmap_stype) {
+ case 1: /* physical mapping */
+ /* volseq = le16toh(mapping->pm1.vol_seq_num); */
+ raw_phys_part = le16toh(mapping->pm1.part_num);
+ pmap_type = UDF_VTOP_TYPE_PHYS;
+ n_phys++;
+ ump->data_part = log_part;
+ ump->node_part = log_part;
+ ump->fids_part = log_part;
+ break;
+ case 2: /* virtual/sparable/meta mapping */
+ map_name = mapping->pm2.part_id.id;
+ /* volseq = le16toh(mapping->pm2.vol_seq_num); */
+ raw_phys_part = le16toh(mapping->pm2.part_num);
+ pmap_type = UDF_VTOP_TYPE_UNKNOWN;
+ len = UDF_REGID_ID_SIZE;
+
+ check_name = "*UDF Virtual Partition";
+ if (strncmp(map_name, check_name, len) == 0) {
+ pmap_type = UDF_VTOP_TYPE_VIRT;
+ n_virt++;
+ ump->node_part = log_part;
+ break;
+ }
+ check_name = "*UDF Sparable Partition";
+ if (strncmp(map_name, check_name, len) == 0) {
+ pmap_type = UDF_VTOP_TYPE_SPARABLE;
+ n_spar++;
+ ump->data_part = log_part;
+ ump->node_part = log_part;
+ ump->fids_part = log_part;
+ break;
+ }
+ check_name = "*UDF Metadata Partition";
+ if (strncmp(map_name, check_name, len) == 0) {
+ /* printf("*UDF Metadata Partition\n"); */
+ pmap_type = UDF_VTOP_TYPE_META;
+ n_meta++;
+ ump->node_part = log_part;
+ ump->fids_part = log_part;
+ break;
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /*
+ * BUGALERT: some rogue implementations use random physical
+ * partition numbers to break other implementations so lookup
+ * the number.
+ */
+ phys_part = udf_find_raw_phys(ump, raw_phys_part);
+
+ DPRINTF(VOLUMES, ("\t%d -> %d(%d) type %d\n", log_part,
+ raw_phys_part, phys_part, pmap_type));
+
+ if (phys_part == UDF_PARTITIONS)
+ return (EINVAL);
+ if (pmap_type == UDF_VTOP_TYPE_UNKNOWN)
+ return (EINVAL);
+
+ ump->vtop [log_part] = phys_part;
+ ump->vtop_tp[log_part] = pmap_type;
+
+ pmap_pos += pmap_size;
+ }
+ /* not winning the beauty contest */
+ ump->vtop_tp[UDF_VTOP_RAWPART] = UDF_VTOP_TYPE_RAW;
+
+ /* test some basic UDF assertions/requirements */
+ if ((n_virt > 1) || (n_spar > 1) || (n_meta > 1))
+ return (EINVAL);
+
+ if (n_virt) {
+ if ((n_phys == 0) || n_spar || n_meta)
+ return (EINVAL);
+ }
+ if (n_spar + n_phys == 0)
+ return (EINVAL);
+
+ /* select allocation type for each logical partition */
+ for (log_part = 0; log_part < n_pm; log_part++) {
+ maps_on = ump->vtop[log_part];
+ switch (ump->vtop_tp[log_part]) {
+ case UDF_VTOP_TYPE_PHYS :
+ KASSERT(maps_on == log_part, ("udf_process_vds: logical"
+ "partition do not equal vpartnr translations\n"));
+ ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
+ break;
+ case UDF_VTOP_TYPE_VIRT :
+ ump->vtop_alloc[log_part] = UDF_ALLOC_VAT;
+ ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
+ break;
+ case UDF_VTOP_TYPE_SPARABLE :
+ KASSERT(maps_on == log_part, ("udf_process_vds: logical"
+ "partition do not equal vpartnr translations\n"));
+ ump->vtop_alloc[log_part] = UDF_ALLOC_SPACEMAP;
+ break;
+ case UDF_VTOP_TYPE_META :
+ ump->vtop_alloc[log_part] = UDF_ALLOC_METABITMAP;
+ if (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE) {
+ /* special case for UDF 2.60 */
+ ump->vtop_alloc[log_part] = UDF_ALLOC_METASEQUENTIAL;
+ ump->vtop_alloc[maps_on] = UDF_ALLOC_SEQUENTIAL;
+ }
+ break;
+ default:
+ panic("bad alloction type in udf's ump->vtop\n");
+ }
+ }
+
+ /* determine logical volume open/closure actions */
+#if 0
+ if (n_virt) {
+ ump->lvopen = 0;
+ if (ump->discinfo.last_session_state == MMC_STATE_EMPTY)
+ ump->lvopen |= UDF_OPEN_SESSION ;
+ ump->lvclose = UDF_WRITE_VAT;
+ if (ump->mount_args.udfmflags & UDFMNT_CLOSESESSION)
+ ump->lvclose |= UDF_CLOSE_SESSION;
+ } else {
+ /* `normal' rewritable or non sequential media */
+ ump->lvopen = UDF_WRITE_LVINT;
+ ump->lvclose = UDF_WRITE_LVINT;
+ if ((ump->discinfo.mmc_cur & MMC_CAP_REWRITABLE) == 0)
+ ump->lvopen |= UDF_APPENDONLY_LVINT;
+ }
+#endif
+
+ /*
+ * Determine sheduler error behaviour. For virtual partitions, update
+ * the trackinfo; for sparable partitions replace a whole block on the
+ * sparable table. Allways requeue.
+ */
+ ump->lvreadwrite = 0;
+ if (n_virt)
+ ump->lvreadwrite = UDF_UPDATE_TRACKINFO;
+ if (n_spar)
+ ump->lvreadwrite = UDF_REMAP_BLOCK;
+
+#if 0
+ /*
+ * Select our scheduler
+ */
+ ump->strategy = &udf_strat_rmw;
+ if (n_virt || (ump->discinfo.mmc_cur & MMC_CAP_PSEUDOOVERWRITE))
+ ump->strategy = &udf_strat_sequential;
+ if ((ump->discinfo.mmc_class == MMC_CLASS_DISC) ||
+ (ump->discinfo.mmc_class == MMC_CLASS_UNKN))
+ ump->strategy = &udf_strat_direct;
+ if (n_spar)
+ ump->strategy = &udf_strat_rmw;
+
+ ump->strategy = &udf_strat_readonly;
+
+#if 0
+ /* read-only access won't benefit from the other shedulers */
+ if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
+ ump->strategy = &udf_strat_direct;
+#endif
+ /*print results */
+ DPRINTF(VOLUMES, ("\tdata partition %d\n", ump->data_part));
+ DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->data_part]));
+ DPRINTF(VOLUMES, ("\tnode partition %d\n", ump->node_part));
+ DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->node_part]));
+ DPRINTF(VOLUMES, ("\tfids partition %d\n", ump->fids_part));
+ DPRINTF(VOLUMES, ("\t\talloc scheme %d\n", ump->vtop_alloc[ump->fids_part]));
+
+ snprintb(bits, sizeof(bits), UDFLOGVOL_BITS, ump->lvopen);
+ DPRINTF(VOLUMES, ("\tactions on logvol open %s\n", bits));
+ snprintb(bits, sizeof(bits), UDFLOGVOL_BITS, ump->lvclose);
+ DPRINTF(VOLUMES, ("\tactions on logvol close %s\n", bits));
+ snprintb(bits, sizeof(bits), UDFONERROR_BITS, ump->lvreadwrite);
+ DPRINTF(VOLUMES, ("\tactions on logvol errors %s\n", bits));
+
+ DPRINTF(VOLUMES, ("\tselected sheduler `%s`\n",
+ (ump->strategy == &udf_strat_direct) ? "Direct" :
+ (ump->strategy == &udf_strat_sequential) ? "Sequential" :
+ (ump->strategy == &udf_strat_rmw) ? "RMW" : "UNKNOWN!"));
+#endif
+ /* signal its OK for now */
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Update logical volume name in all structures that keep a record of it. We
+ * use memmove since each of them might be specified as a source.
+ *
+ * Note that it doesn't update the VAT structure!
+ */
+
+static void
+udf_update_logvolname(struct udf_mount *ump, char *logvol_id)
+{
+ struct logvol_desc *lvd = NULL;
+ struct fileset_desc *fsd = NULL;
+ struct udf_lv_info *lvi = NULL;
+
+ DPRINTF(VOLUMES, ("Updating logical volume name\n"));
+ lvd = ump->logical_vol;
+ fsd = ump->fileset_desc;
+ if (ump->implementation)
+ lvi = &ump->implementation->_impl_use.lv_info;
+
+ /* logvol's id might be specified as origional so use memmove here */
+ memmove(lvd->logvol_id, logvol_id, 128);
+ if (fsd)
+ memmove(fsd->logvol_id, logvol_id, 128);
+ if (lvi)
+ memmove(lvi->logvol_id, logvol_id, 128);
+}
+
+/* --------------------------------------------------------------------- */
+#if 0
+void
+udf_inittag(struct udf_mount *ump, struct desc_tag *tag, int tagid,
+ uint32_t sector)
+{
+ assert(ump->logical_vol);
+
+ tag->id = htole16(tagid);
+ tag->descriptor_ver = ump->logical_vol->tag.descriptor_ver;
+ tag->cksum = 0;
+ tag->reserved = 0;
+ tag->serial_num = ump->logical_vol->tag.serial_num;
+ tag->tag_loc = htole32(sector);
+}
+
+
+uint64_t
+udf_advance_uniqueid(struct udf_mount *ump)
+{
+ uint64_t unique_id;
+
+ mutex_enter(&ump->logvol_mutex);
+ unique_id = le64toh(ump->logvol_integrity->lvint_next_unique_id);
+ if (unique_id < 0x10)
+ unique_id = 0x10;
+ ump->logvol_integrity->lvint_next_unique_id = htole64(unique_id + 1);
+ mutex_exit(&ump->logvol_mutex);
+
+ return (unique_id);
+}
+
+
+static void
+udf_adjust_filecount(struct udf_node *udf_node, int sign)
+{
+ struct udf_mount *ump = udf_node->ump;
+ uint32_t num_dirs, num_files;
+ int udf_file_type;
+
+ /* get file type */
+ if (udf_node->fe) {
+ udf_file_type = udf_node->fe->icbtag.file_type;
+ } else {
+ udf_file_type = udf_node->efe->icbtag.file_type;
+ }
+
+ /* adjust file count */
+ mutex_enter(&ump->allocate_mutex);
+ if (udf_file_type == UDF_ICB_FILETYPE_DIRECTORY) {
+ num_dirs = le32toh(ump->logvol_info->num_directories);
+ ump->logvol_info->num_directories =
+ htole32((num_dirs + sign));
+ } else {
+ num_files = le32toh(ump->logvol_info->num_files);
+ ump->logvol_info->num_files =
+ htole32((num_files + sign));
+ }
+ mutex_exit(&ump->allocate_mutex);
+}
+
+
+void
+udf_osta_charset(struct charspec *charspec)
+{
+ memset(charspec, 0, sizeof(struct charspec));
+ charspec->type = 0;
+ strcpy((char *) charspec->inf, "OSTA Compressed Unicode");
+}
+
+
+/* first call udf_set_regid and then the suffix */
+void
+udf_set_regid(struct regid *regid, char const *name)
+{
+ memset(regid, 0, sizeof(struct regid));
+ regid->flags = 0; /* not dirty and not protected */
+ strcpy((char *) regid->id, name);
+}
+
+
+void
+udf_add_domain_regid(struct udf_mount *ump, struct regid *regid)
+{
+ uint16_t *ver;
+
+ ver = (uint16_t *) regid->id_suffix;
+ *ver = ump->logvol_info->min_udf_readver;
+}
+
+
+void
+udf_add_udf_regid(struct udf_mount *ump, struct regid *regid)
+{
+ uint16_t *ver;
+
+ ver = (uint16_t *) regid->id_suffix;
+ *ver = ump->logvol_info->min_udf_readver;
+
+ regid->id_suffix[2] = 4; /* unix */
+ regid->id_suffix[3] = 8; /* NetBSD */
+}
+
+
+void
+udf_add_impl_regid(struct udf_mount *ump, struct regid *regid)
+{
+ regid->id_suffix[0] = 4; /* unix */
+ regid->id_suffix[1] = 8; /* NetBSD */
+}
+
+
+void
+udf_add_app_regid(struct udf_mount *ump, struct regid *regid)
+{
+ regid->id_suffix[0] = APP_VERSION_MAIN;
+ regid->id_suffix[1] = APP_VERSION_SUB;
+}
+
+static int
+udf_create_parentfid(struct udf_mount *ump, struct fileid_desc *fid,
+ struct long_ad *parent, uint64_t unique_id)
+{
+ /* the size of an empty FID is 38 but needs to be a multiple of 4 */
+ int fidsize = 40;
+
+ udf_inittag(ump, &fid->tag, TAGID_FID, le32toh(parent->loc.lb_num));
+ fid->file_version_num = htole16(1); /* UDF 2.3.4.1 */
+ fid->file_char = UDF_FILE_CHAR_DIR | UDF_FILE_CHAR_PAR;
+ fid->icb = *parent;
+ fid->icb.longad_uniqueid = htole32((uint32_t) unique_id);
+ fid->tag.desc_crc_len = htole16(fidsize - UDF_DESC_TAG_LENGTH);
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
+
+ return (fidsize);
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+/*
+ * Extended attribute support. UDF knows of 3 places for extended attributes:
+ *
+ * (a) inside the file's (e)fe in the length of the extended attribute area
+ * before the allocation descriptors/filedata
+ *
+ * (b) in a file referenced by (e)fe->ext_attr_icb and
+ *
+ * (c) in the e(fe)'s associated stream directory that can hold various
+ * sub-files. In the stream directory a few fixed named subfiles are reserved
+ * for NT/Unix ACL's and OS/2 attributes.
+ *
+ * NOTE: Extended attributes are read randomly but allways written
+ * *atomicaly*. For ACL's this interface is propably different but not known
+ * to me yet.
+ *
+ * Order of extended attributes in a space :
+ * ECMA 167 EAs
+ * Non block aligned Implementation Use EAs
+ * Block aligned Implementation Use EAs
+ * Application Use EAs
+ */
+
+static int
+udf_impl_extattr_check(struct impl_extattr_entry *implext)
+{
+ uint16_t *spos;
+
+ if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
+ /* checksum valid? */
+ spos = (uint16_t *) implext->data;
+ if (le16toh(*spos) != udf_ea_cksum((uint8_t *) implext))
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static void
+udf_calc_impl_extattr_checksum(struct impl_extattr_entry *implext)
+{
+ uint16_t *spos;
+
+ if (strncmp(implext->imp_id.id, "*UDF", 4) == 0) {
+ /* set checksum */
+ spos = (uint16_t *) implext->data;
+ *spos = le16toh(udf_ea_cksum((uint8_t *) implext));
+ }
+}
+
+
+int
+udf_extattr_search_intern(struct udf_node *node,
+ uint32_t sattr, char const *sattrname,
+ uint32_t *offsetp, uint32_t *lengthp)
+{
+ struct extattrhdr_desc *eahdr;
+ struct extattr_entry *attrhdr;
+ struct impl_extattr_entry *implext;
+ int error;
+ int32_t l_ea;
+ uint32_t offset, a_l, sector_size;
+ uint8_t *pos;
+
+ /* get mountpoint */
+ sector_size = node->ump->discinfo.sector_size;
+
+ /* get information from fe/efe */
+ if (node->fe) {
+ l_ea = le32toh(node->fe->l_ea);
+ eahdr = (struct extattrhdr_desc *) node->fe->data;
+ } else {
+ KASSERT(node->efe, ("Extended File Entry is null"));
+ l_ea = le32toh(node->efe->l_ea);
+ eahdr = (struct extattrhdr_desc *) node->efe->data;
+ }
+
+ /* something recorded here? */
+ if (l_ea == 0)
+ return (ENOENT);
+
+ /* check extended attribute tag; what to do if it fails? */
+ error = udf_check_tag(eahdr);
+ if (error)
+ return (EINVAL);
+ if (le16toh(eahdr->tag.id) != TAGID_EXTATTR_HDR)
+ return (EINVAL);
+ error = udf_check_tag_payload(eahdr, sizeof(struct extattrhdr_desc));
+ if (error)
+ return (EINVAL);
+
+ DPRINTF(EXTATTR, ("Found %d bytes of extended attributes\n", l_ea));
+
+ /* looking for Ecma-167 attributes? */
+ offset = sizeof(struct extattrhdr_desc);
+
+ /* looking for either implemenation use or application use */
+ if (sattr == 2048) { /* [4/48.10.8] */
+ offset = le32toh(eahdr->impl_attr_loc);
+ if (offset == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
+ return (ENOENT);
+ }
+ if (sattr == 65536) { /* [4/48.10.9] */
+ offset = le32toh(eahdr->appl_attr_loc);
+ if (offset == UDF_APPL_ATTR_LOC_NOT_PRESENT)
+ return (ENOENT);
+ }
+
+ /* paranoia check offset and l_ea */
+ if (l_ea + offset >= sector_size - sizeof(struct extattr_entry))
+ return (EINVAL);
+
+ DPRINTF(EXTATTR, ("Starting at offset %d\n", offset));
+
+ /* find our extended attribute */
+ l_ea -= offset;
+ pos = (uint8_t *) eahdr + offset;
+
+ while (l_ea >= sizeof(struct extattr_entry)) {
+ DPRINTF(EXTATTR, ("%d extended attr bytes left\n", l_ea));
+ attrhdr = (struct extattr_entry *) pos;
+ implext = (struct impl_extattr_entry *) pos;
+
+ /* get complete attribute length and check for roque values */
+ a_l = le32toh(attrhdr->a_l);
+ DPRINTF(EXTATTR, ("attribute %d:%d, len %d/%d\n",
+ le32toh(attrhdr->type),
+ attrhdr->subtype, a_l, l_ea));
+ if ((a_l == 0) || (a_l > l_ea))
+ return (EINVAL);
+
+ if (attrhdr->type != sattr)
+ goto next_attribute;
+
+ /* we might have found it! */
+ if (attrhdr->type < 2048) { /* Ecma-167 attribute */
+ *offsetp = offset;
+ *lengthp = a_l;
+ return (0); /* success */
+ }
+
+ /*
+ * Implementation use and application use extended attributes
+ * have a name to identify. They share the same structure only
+ * UDF implementation use extended attributes have a checksum
+ * we need to check
+ */
+
+ DPRINTF(EXTATTR, ("named attribute %s\n", implext->imp_id.id));
+ if (strcmp(implext->imp_id.id, sattrname) == 0) {
+ /* we have found our appl/implementation attribute */
+ *offsetp = offset;
+ *lengthp = a_l;
+ return (0); /* success */
+ }
+
+next_attribute:
+ /* next attribute */
+ pos += a_l;
+ l_ea -= a_l;
+ offset += a_l;
+ }
+ /* not found */
+ return (ENOENT);
+}
+
+
+#if 0
+static void
+udf_extattr_insert_internal(struct udf_mount *ump, union dscrptr *dscr,
+ struct extattr_entry *extattr)
+{
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct extattrhdr_desc *extattrhdr;
+ struct impl_extattr_entry *implext;
+ uint32_t impl_attr_loc, appl_attr_loc, l_ea, a_l, exthdr_len;
+ uint32_t *l_eap, l_ad;
+ uint16_t *spos;
+ uint8_t *bpos, *data;
+
+ if (le16toh(dscr->tag.id) == TAGID_FENTRY) {
+ fe = &dscr->fe;
+ data = fe->data;
+ l_eap = &fe->l_ea;
+ l_ad = le32toh(fe->l_ad);
+ } else if (le16toh(dscr->tag.id) == TAGID_EXTFENTRY) {
+ efe = &dscr->efe;
+ data = efe->data;
+ l_eap = &efe->l_ea;
+ l_ad = le32toh(efe->l_ad);
+ } else {
+ panic("Bad tag passed to udf_extattr_insert_internal");
+ }
+
+ /* can't append already written to file descriptors yet */
+ assert(l_ad == 0);
+
+ /* should have a header! */
+ extattrhdr = (struct extattrhdr_desc *) data;
+ l_ea = le32toh(*l_eap);
+ if (l_ea == 0) {
+ /* create empty extended attribute header */
+ exthdr_len = sizeof(struct extattrhdr_desc);
+
+ udf_inittag(ump, &extattrhdr->tag, TAGID_EXTATTR_HDR,
+ /* loc */ 0);
+ extattrhdr->impl_attr_loc = htole32(exthdr_len);
+ extattrhdr->appl_attr_loc = htole32(exthdr_len);
+ extattrhdr->tag.desc_crc_len = htole16(8);
+
+ /* record extended attribute header length */
+ l_ea = exthdr_len;
+ *l_eap = htole32(l_ea);
+ }
+
+ /* extract locations */
+ impl_attr_loc = le32toh(extattrhdr->impl_attr_loc);
+ appl_attr_loc = le32toh(extattrhdr->appl_attr_loc);
+ if (impl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
+ impl_attr_loc = l_ea;
+ if (appl_attr_loc == UDF_IMPL_ATTR_LOC_NOT_PRESENT)
+ appl_attr_loc = l_ea;
+
+ /* Ecma 167 EAs */
+ if (le32toh(extattr->type) < 2048) {
+ assert(impl_attr_loc == l_ea);
+ assert(appl_attr_loc == l_ea);
+ }
+
+ /* implementation use extended attributes */
+ if (le32toh(extattr->type) == 2048) {
+ assert(appl_attr_loc == l_ea);
+
+ /* calculate and write extended attribute header checksum */
+ implext = (struct impl_extattr_entry *) extattr;
+ assert(le32toh(implext->iu_l) == 4); /* [UDF 3.3.4.5] */
+ spos = (uint16_t *) implext->data;
+ *spos = htole16(udf_ea_cksum((uint8_t *) implext));
+ }
+
+ /* application use extended attributes */
+ assert(le32toh(extattr->type) != 65536);
+ assert(appl_attr_loc == l_ea);
+
+ /* append the attribute at the end of the current space */
+ bpos = data + le32toh(*l_eap);
+ a_l = le32toh(extattr->a_l);
+
+ /* update impl. attribute locations */
+ if (le32toh(extattr->type) < 2048) {
+ impl_attr_loc = l_ea + a_l;
+ appl_attr_loc = l_ea + a_l;
+ }
+ if (le32toh(extattr->type) == 2048) {
+ appl_attr_loc = l_ea + a_l;
+ }
+
+ /* copy and advance */
+ memcpy(bpos, extattr, a_l);
+ l_ea += a_l;
+ *l_eap = htole32(l_ea);
+
+ /* do the `dance` again backwards */
+ if (le16toh(ump->logical_vol->tag.descriptor_ver) != 2) {
+ if (impl_attr_loc == l_ea)
+ impl_attr_loc = UDF_IMPL_ATTR_LOC_NOT_PRESENT;
+ if (appl_attr_loc == l_ea)
+ appl_attr_loc = UDF_APPL_ATTR_LOC_NOT_PRESENT;
+ }
+
+ /* store offsets */
+ extattrhdr->impl_attr_loc = htole32(impl_attr_loc);
+ extattrhdr->appl_attr_loc = htole32(appl_attr_loc);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_update_lvid_from_vat_extattr(struct udf_node *vat_node)
+{
+ struct impl_extattr_entry *implext;
+ struct vatlvext_extattr_entry lvext;
+ struct udf_mount *ump;
+ struct udf_logvol_info *lvinfo;
+ uint64_t vat_uniqueid;
+ int error;
+ uint32_t offset, a_l;
+ const char *extstr = "*UDF VAT LVExtension";
+ uint8_t *ea_start, *lvextpos;
+
+ /* get mountpoint and lvinfo */
+ ump = vat_node->ump;
+ lvinfo = ump->logvol_info;
+
+ /* get information from fe/efe */
+ if (vat_node->fe) {
+ vat_uniqueid = le64toh(vat_node->fe->unique_id);
+ ea_start = vat_node->fe->data;
+ } else {
+ vat_uniqueid = le64toh(vat_node->efe->unique_id);
+ ea_start = vat_node->efe->data;
+ }
+
+ error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
+ if (error)
+ return (error);
+
+ implext = (struct impl_extattr_entry *) (ea_start + offset);
+ error = udf_impl_extattr_check(implext);
+ if (error)
+ return (error);
+
+ /* paranoia */
+ if (a_l != sizeof(*implext) -1 + le32toh(implext->iu_l) + sizeof(lvext))
+ DPRINTF(VOLUMES, ("VAT LVExtension size doens't compute\n"));
+ return (EINVAL);
+
+ /*
+ * we have found our "VAT LVExtension attribute. BUT due to a
+ * bug in the specification it might not be word aligned so
+ * copy first to avoid panics on some machines (!!)
+ */
+ DPRINTF(VOLUMES, ("Found VAT LVExtension attr\n"));
+ lvextpos = implext->data + le32toh(implext->iu_l);
+ memcpy(&lvext, lvextpos, sizeof(lvext));
+
+ /* check if it was updated the last time */
+ if (le64toh(lvext.unique_id_chk) == vat_uniqueid) {
+ lvinfo->num_files = lvext.num_files;
+ lvinfo->num_directories = lvext.num_directories;
+ udf_update_logvolname(ump, lvext.logvol_id);
+ } else {
+ DPRINTF(VOLUMES, ("VAT LVExtension out of date"));
+ /* replace VAT LVExt by free space EA */
+ memset(implext->imp_id.id, 0, UDF_REGID_ID_SIZE);
+ strcpy(implext->imp_id.id, "*UDF FreeEASpace");
+ udf_calc_impl_extattr_checksum(implext);
+ }
+
+ return (0);
+}
+
+#if 0
+static int
+udf_update_vat_extattr_from_lvid(struct udf_node *vat_node)
+{
+ struct udf_mount *ump;
+ struct udf_logvol_info *lvinfo;
+ struct impl_extattr_entry *implext;
+ struct vatlvext_extattr_entry lvext;
+ const char *extstr = "*UDF VAT LVExtension";
+ uint64_t vat_uniqueid;
+ uint32_t offset, a_l;
+ uint8_t *ea_start, *lvextpos;
+ int error;
+
+ /* get mountpoint and lvinfo */
+ ump = vat_node->ump;
+ lvinfo = ump->logvol_info;
+
+ /* get information from fe/efe */
+ if (vat_node->fe) {
+ vat_uniqueid = le64toh(vat_node->fe->unique_id);
+ ea_start = vat_node->fe->data;
+ } else {
+ vat_uniqueid = le64toh(vat_node->efe->unique_id);
+ ea_start = vat_node->efe->data;
+ }
+
+ error = udf_extattr_search_intern(vat_node, 2048, extstr, &offset, &a_l);
+ if (error)
+ return (error);
+ /* found, it existed */
+
+ /* paranoia */
+ implext = (struct impl_extattr_entry *) (ea_start + offset);
+ error = udf_impl_extattr_check(implext);
+ if (error) {
+ DPRINTF(VOLUMES, ("VAT LVExtension bad on update\n"));
+ return (error);
+ }
+ /* it is correct */
+
+ /*
+ * we have found our "VAT LVExtension attribute. BUT due to a
+ * bug in the specification it might not be word aligned so
+ * copy first to avoid panics on some machines (!!)
+ */
+ DPRINTF(VOLUMES, ("Updating VAT LVExtension attr\n"));
+ lvextpos = implext->data + le32toh(implext->iu_l);
+
+ lvext.unique_id_chk = vat_uniqueid;
+ lvext.num_files = lvinfo->num_files;
+ lvext.num_directories = lvinfo->num_directories;
+ memmove(lvext.logvol_id, ump->logical_vol->logvol_id, 128);
+
+ memcpy(lvextpos, &lvext, sizeof(lvext));
+
+ return (0);
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+int
+udf_vat_read(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
+{
+ struct udf_mount *ump = vat_node->ump;
+
+ if (offset + size > ump->vat_offset + ump->vat_entries * 4)
+ return (EINVAL);
+
+ memcpy(blob, ump->vat_table + offset, size);
+ return (0);
+}
+
+#if 0
+int
+udf_vat_write(struct udf_node *vat_node, uint8_t *blob, int size, uint32_t offset)
+{
+ struct udf_mount *ump = vat_node->ump;
+ uint32_t offset_high;
+ uint8_t *new_vat_table;
+
+ /* extent VAT allocation if needed */
+ offset_high = offset + size;
+ if (offset_high >= ump->vat_table_alloc_len) {
+ /* realloc */
+ new_vat_table = realloc(ump->vat_table,
+ ump->vat_table_alloc_len + UDF_VAT_CHUNKSIZE,
+ M_UDFTEMP, M_WAITOK | M_CANFAIL);
+ if (!new_vat_table) {
+ printf("udf_vat_write: can't extent VAT, out of mem\n");
+ return (ENOMEM);
+ }
+ ump->vat_table = new_vat_table;
+ ump->vat_table_alloc_len += UDF_VAT_CHUNKSIZE;
+ }
+ ump->vat_table_len = MAX(ump->vat_table_len, offset_high);
+
+ memcpy(ump->vat_table + offset, blob, size);
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* TODO support previous VAT location writeout */
+static int
+udf_update_vat_descriptor(struct udf_mount *ump)
+{
+ struct udf_node *vat_node = ump->vat_node;
+ struct udf_logvol_info *lvinfo = ump->logvol_info;
+ struct icb_tag *icbtag;
+ struct udf_oldvat_tail *oldvat_tl;
+ struct udf_vat *vat;
+ uint64_t unique_id;
+ uint32_t lb_size;
+ uint8_t *raw_vat;
+ int filetype, error;
+
+ KASSERT(vat_node);
+ KASSERT(lvinfo);
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ /* get our new unique_id */
+ unique_id = udf_advance_uniqueid(ump);
+
+ /* get information from fe/efe */
+ if (vat_node->fe) {
+ icbtag = &vat_node->fe->icbtag;
+ vat_node->fe->unique_id = htole64(unique_id);
+ } else {
+ icbtag = &vat_node->efe->icbtag;
+ vat_node->efe->unique_id = htole64(unique_id);
+ }
+
+ /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
+ filetype = icbtag->file_type;
+ KASSERT((filetype == 0) || (filetype == UDF_ICB_FILETYPE_VAT));
+
+ /* allocate piece to process head or tail of VAT file */
+ raw_vat = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+
+ if (filetype == 0) {
+ /*
+ * Update "*UDF VAT LVExtension" extended attribute from the
+ * lvint if present.
+ */
+ udf_update_vat_extattr_from_lvid(vat_node);
+
+ /* setup identifying regid */
+ oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
+ memset(oldvat_tl, 0, sizeof(struct udf_oldvat_tail));
+
+ udf_set_regid(&oldvat_tl->id, "*UDF Virtual Alloc Tbl");
+ udf_add_udf_regid(ump, &oldvat_tl->id);
+ oldvat_tl->prev_vat = htole32(0xffffffff);
+
+ /* write out new tail of virtual allocation table file */
+ error = udf_vat_write(vat_node, raw_vat,
+ sizeof(struct udf_oldvat_tail), ump->vat_entries * 4);
+ } else {
+ /* compose the VAT2 header */
+ vat = (struct udf_vat *) raw_vat;
+ memset(vat, 0, sizeof(struct udf_vat));
+
+ vat->header_len = htole16(152); /* as per spec */
+ vat->impl_use_len = htole16(0);
+ memmove(vat->logvol_id, ump->logical_vol->logvol_id, 128);
+ vat->prev_vat = htole32(0xffffffff);
+ vat->num_files = lvinfo->num_files;
+ vat->num_directories = lvinfo->num_directories;
+ vat->min_udf_readver = lvinfo->min_udf_readver;
+ vat->min_udf_writever = lvinfo->min_udf_writever;
+ vat->max_udf_writever = lvinfo->max_udf_writever;
+
+ error = udf_vat_write(vat_node, raw_vat,
+ sizeof(struct udf_vat), 0);
+ }
+ free(raw_vat, M_UDFTEMP);
+
+ return (error); /* success! */
+}
+
+
+int
+udf_writeout_vat(struct udf_mount *ump)
+{
+ struct udf_node *vat_node = ump->vat_node;
+ uint32_t vat_length;
+ int error;
+
+ KASSERT(vat_node);
+
+ DPRINTF(CALL, ("udf_writeout_vat\n"));
+
+/* mutex_enter(&ump->allocate_mutex); */
+ udf_update_vat_descriptor(ump);
+
+ /* write out the VAT contents ; TODO intelligent writing */
+ vat_length = ump->vat_table_len;
+ error = vn_rdwr(UIO_WRITE, vat_node->vnode,
+ ump->vat_table, ump->vat_table_len, 0,
+ UIO_SYSSPACE, IO_NODELOCKED, FSCRED, NULL, NULL);
+ if (error) {
+ printf("udf_writeout_vat: failed to write out VAT contents\n");
+ goto out;
+ }
+
+/* mutex_exit(&ump->allocate_mutex); */
+
+ vflushbuf(ump->vat_node->vnode, 1 /* sync */);
+ error = VOP_FSYNC(ump->vat_node->vnode,
+ FSCRED, FSYNC_WAIT, 0, 0);
+ if (error)
+ printf("udf_writeout_vat: error writing VAT node!\n");
+out:
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Read in relevant pieces of VAT file and check if its indeed a VAT file
+ * descriptor. If OK, read in complete VAT file.
+ */
+
+static int
+udf_check_for_vat(struct udf_node *vat_node)
+{
+ struct udf_mount *ump;
+ struct icb_tag *icbtag;
+ struct timestamp *mtime;
+ struct udf_vat *vat;
+ struct udf_oldvat_tail *oldvat_tl;
+ struct udf_logvol_info *lvinfo;
+ uint64_t unique_id;
+ int filetype, error;
+ uint32_t vat_length, vat_offset, vat_entries, vat_table_alloc_len;
+ uint32_t sector_size, *raw_vat;
+ char *regid_name;
+ uint8_t *vat_table;
+
+ /* vat_length is really 64 bits though impossible */
+
+ DPRINTF(VOLUMES, ("Checking for VAT\n"));
+ if (!vat_node)
+ return (ENOENT);
+
+ /* get mount info */
+ ump = vat_node->ump;
+ sector_size = le32toh(ump->logical_vol->lb_size);
+
+ /* check assertions */
+ KASSERT(vat_node->fe || vat_node->efe, ("udf_check_for_vat:"
+ "Extended File Entry or File Entry is null\n"));
+ KASSERT(ump->logvol_integrity, ("udf_check_for_vat: invalid current integrity"));
+
+ /* set vnode type to regular file or we can't read from it! */
+ /* vat_node->vnode->v_type = VREG; */
+
+ /* get information from fe/efe */
+ if (vat_node->fe) {
+ vat_length = le64toh(vat_node->fe->inf_len);
+ icbtag = &vat_node->fe->icbtag;
+ mtime = &vat_node->fe->mtime;
+ unique_id = le64toh(vat_node->fe->unique_id);
+ } else {
+ vat_length = le64toh(vat_node->efe->inf_len);
+ icbtag = &vat_node->efe->icbtag;
+ mtime = &vat_node->efe->mtime;
+ unique_id = le64toh(vat_node->efe->unique_id);
+ }
+
+ /* Check icb filetype! it has to be 0 or UDF_ICB_FILETYPE_VAT */
+ filetype = icbtag->file_type;
+ if ((filetype != 0) && (filetype != UDF_ICB_FILETYPE_VAT))
+ return (ENOENT);
+
+ DPRINTF(VOLUMES, ("\tPossible VAT length %d\n", vat_length));
+
+ vat_table_alloc_len =
+ ((vat_length + UDF_VAT_CHUNKSIZE-1) / UDF_VAT_CHUNKSIZE)
+ * UDF_VAT_CHUNKSIZE;
+
+ vat_table = malloc(vat_table_alloc_len, M_UDFTEMP, M_WAITOK);
+#if 0
+ /*M_CANFAIL was removed from third arg */
+ if (vat_table == NULL) {
+ printf("allocation of %d bytes failed for VAT\n",
+ vat_table_alloc_len);
+ return (ENOMEM);
+ }
+#endif
+
+ /* allocate piece to read in head or tail of VAT file */
+ raw_vat = malloc(sector_size, M_UDFTEMP, M_WAITOK);
+
+ /*
+ * check contents of the file if its the old 1.50 VAT table format.
+ * Its notoriously broken and allthough some implementations support an
+ * extension as defined in the UDF 1.50 errata document, its doubtfull
+ * to be useable since a lot of implementations don't maintain it.
+ */
+ lvinfo = ump->logvol_info;
+
+ if (filetype == 0) {
+ /* definition */
+ vat_offset = 0;
+ vat_entries = (vat_length-36)/4;
+
+ /* read in tail of virtual allocation table file */
+ error = udf_read_node(vat_node, (uint8_t *)raw_vat,
+ vat_entries * 4, sizeof(struct udf_oldvat_tail));
+ /*error = vn_rdwr(UIO_READ, vat_node->vnode,
+ (uint8_t *) raw_vat,
+ sizeof(struct udf_oldvat_tail),
+ vat_entries * 4,
+ UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
+ NULL, NULL, curthread);*/
+ if (error)
+ goto out;
+
+ /* check 1.50 VAT */
+ oldvat_tl = (struct udf_oldvat_tail *) raw_vat;
+ regid_name = (char *) oldvat_tl->id.id;
+ error = strncmp(regid_name, "*UDF Virtual Alloc Tbl", 22);
+ if (error) {
+ DPRINTF(VOLUMES, ("VAT format 1.50 rejected\n"));
+ error = ENOENT;
+ goto out;
+ }
+
+ /*
+ * update LVID from "*UDF VAT LVExtension" extended attribute
+ * if present.
+ */
+ udf_update_lvid_from_vat_extattr(vat_node);
+ } else {
+ /* read in head of virtual allocation table file */
+ error = udf_read_node(vat_node, (uint8_t *)raw_vat,
+ 0, sizeof(struct udf_vat));
+ /*error = vn_rdwr(UIO_READ, vat_node->vnode,
+ (uint8_t *) raw_vat,
+ sizeof(struct udf_vat), 0,
+ UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
+ NULL, NULL, curthread);*/
+ if (error)
+ goto out;
+
+ /* definition */
+ vat = (struct udf_vat *) raw_vat;
+ vat_offset = vat->header_len;
+ vat_entries = (vat_length - vat_offset)/4;
+
+ KASSERT(lvinfo, ("udf_check_for_vat: invalid integrity descriptor"));
+ lvinfo->num_files = vat->num_files;
+ lvinfo->num_directories = vat->num_directories;
+ lvinfo->min_udf_readver = vat->min_udf_readver;
+ lvinfo->min_udf_writever = vat->min_udf_writever;
+ lvinfo->max_udf_writever = vat->max_udf_writever;
+
+ udf_update_logvolname(ump, vat->logvol_id);
+ }
+
+ /* read in complete VAT file */
+ error = udf_read_node(vat_node, vat_table, 0, vat_length);
+ /*error = vn_rdwr(UIO_READ, vat_node->vnode,
+ vat_table,
+ vat_length, 0,
+ UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED, FSCRED,
+ NULL, NULL, curthread);*/
+ if (error)
+ printf("read in of complete VAT file failed (error %d)\n",
+ error);
+ if (error)
+ goto out;
+
+ DPRINTF(VOLUMES, ("VAT format accepted, marking it closed\n"));
+ ump->logvol_integrity->lvint_next_unique_id = htole64(unique_id);
+ ump->logvol_integrity->integrity_type = htole32(UDF_INTEGRITY_CLOSED);
+ ump->logvol_integrity->time = *mtime;
+
+ ump->vat_table_len = vat_length;
+ ump->vat_table_alloc_len = vat_table_alloc_len;
+ ump->vat_table = vat_table;
+ ump->vat_offset = vat_offset;
+ ump->vat_entries = vat_entries;
+ ump->vat_last_free_lb = 0; /* start at beginning */
+
+out:
+ if (error) {
+ if (vat_table)
+ free(vat_table, M_UDFTEMP);
+ }
+ free(raw_vat, M_UDFTEMP);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_search_vat(struct udf_mount *ump, union udf_pmap *mapping __unused)
+{
+ union dscrptr *dscr;
+ /* struct vnode *vp; */
+ struct long_ad icb_loc;
+ struct udf_node *vat_node;
+ ino_t ino;
+ int error;
+ uint32_t early_vat_loc, late_vat_loc, vat_loc;
+ uint16_t tagid;
+ uint8_t file_type;
+
+ vat_node = NULL;
+
+ /* mapping info not needed */
+ /* mapping = mapping; */ /* XXX */
+
+ vat_loc = ump->last_possible_vat_location;
+ early_vat_loc = vat_loc - 256; /* 8 blocks of 32 sectors */
+
+ DPRINTF(VOLUMES, ("1) last possible %d, early_vat_loc %d \n",
+ vat_loc, early_vat_loc));
+ early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
+
+ early_vat_loc = MAX(early_vat_loc, ump->first_possible_vat_location);
+ late_vat_loc = vat_loc + 1024;
+
+ DPRINTF(VOLUMES, ("2) last possible %d, early_vat_loc %d \n",
+ vat_loc, early_vat_loc));
+
+ /* start looking from the end of the range */
+ do {
+ if (vat_node) {
+ udf_dispose_node(vat_node);
+ vat_node = NULL;
+ }
+
+ error = udf_read_phys_dscr(ump, vat_loc, M_UDFTEMP, &dscr);
+ if (!error && dscr) { /* dscr will be null if zeros were read */
+ tagid = le16toh(dscr->tag.id);
+ file_type = 0;
+ if (tagid == TAGID_FENTRY)
+ file_type = dscr->fe.icbtag.file_type;
+ else if (tagid == TAGID_EXTFENTRY)
+ file_type = dscr->efe.icbtag.file_type;
+ free(dscr, M_UDFTEMP);
+
+ if (file_type == 248)
+ {
+ DPRINTF(VOLUMES, ("Checking for VAT at sector %d\n", vat_loc));
+ icb_loc.loc.part_num = htole16(UDF_VTOP_RAWPART);
+ icb_loc.loc.lb_num = htole32(vat_loc);
+ ino = udf_get_node_id(&icb_loc);
+
+ error = udf_get_node(ump, ino, &vat_node);
+ if (!error) {
+ error = udf_check_for_vat(vat_node);
+ DPRINTFIF(VOLUMES, !error,
+ ("VAT accepted at %d\n", vat_loc));
+ if (!error)
+ break;
+ }
+ }
+ }
+
+ vat_loc--; /* walk backwards */
+ } while (vat_loc >= early_vat_loc);
+
+ /* keep our VAT node around */
+ ump->vat_node = vat_node;
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_read_sparables(struct udf_mount *ump, union udf_pmap *mapping)
+{
+ union dscrptr *dscr;
+ struct part_map_spare *pms = &mapping->pms;
+ int spar, error;
+ uint32_t lb_num;
+
+ /*
+ * The partition mapping passed on to us specifies the information we
+ * need to locate and initialise the sparable partition mapping
+ * information we need.
+ */
+
+ ump->sparable_packet_size = le16toh(pms->packet_len);
+ KASSERT(ump->sparable_packet_size >= ump->packet_size,
+ ("udf_read_sparables: sparable packet size is less than packet size"));
+
+ for (spar = 0; spar < pms->n_st; spar++) {
+ lb_num = pms->st_loc[spar];
+ error = udf_read_phys_dscr(ump, lb_num, M_UDFTEMP, &dscr);
+ if (!error && dscr) {
+ if (le16toh(dscr->tag.id) == TAGID_SPARING_TABLE) {
+ if (ump->sparing_table)
+ free(ump->sparing_table, M_UDFTEMP);
+ ump->sparing_table = &dscr->spt;
+ dscr = NULL;
+ break; /* we're done */
+ }
+ }
+ if (dscr)
+ free(dscr, M_UDFTEMP);
+ }
+
+ if (ump->sparing_table)
+ return (0);
+
+ return (ENOENT);
+}
+
+/* --------------------------------------------------------------------- */
+static int
+udf_read_metadata_nodes(struct udf_mount *ump, union udf_pmap *mapping)
+{
+ struct part_map_meta *pmm = &mapping->pmm;
+ struct long_ad icb_loc;
+#if 0
+ struct vnode *vp;
+#endif
+ int error;
+ ino_t ino;
+
+ /* extract our allocation parameters set up on format */
+ ump->metadata_alloc_unit_size = le32toh(mapping->pmm.alloc_unit_size);
+ ump->metadata_alignment_unit_size = le16toh(mapping->pmm.alignment_unit_size);
+ ump->metadata_flags = mapping->pmm.flags;
+
+ /* DPRINTF(VOLUMES, ("Reading in Metadata files\n")); */
+ icb_loc.loc.part_num = pmm->part_num;
+ icb_loc.loc.lb_num = pmm->meta_file_lbn;
+ /* DPRINTF(VOLUMES, ("Metadata file\n")); */
+ ino = udf_get_node_id(&icb_loc);
+ error = udf_get_node(ump, ino, &ump->metadata_node);
+#if 0
+ if (ump->metadata_node) {
+ vp = ump->metadata_node->vnode;
+ UDF_SET_SYSTEMFILE(vp);
+ }
+#endif
+
+ icb_loc.loc.lb_num = pmm->meta_mirror_file_lbn;
+ if (icb_loc.loc.lb_num != -1) {
+ /* DPRINTF(VOLUMES, ("Metadata copy file\n")); */
+ ino = udf_get_node_id(&icb_loc);
+ error = udf_get_node(ump, ino, &ump->metadatamirror_node);
+#if 0
+ if (ump->metadatamirror_node) {
+ vp = ump->metadatamirror_node->vnode;
+ UDF_SET_SYSTEMFILE(vp);
+ }
+#endif
+ }
+
+ icb_loc.loc.lb_num = pmm->meta_bitmap_file_lbn;
+ if (icb_loc.loc.lb_num != -1) {
+ /* DPRINTF(VOLUMES, ("Metadata bitmap file\n")); */
+ ino = udf_get_node_id(&icb_loc);
+ error = udf_get_node(ump, ino, &ump->metadatabitmap_node);
+#if 0
+ if (ump->metadatabitmap_node) {
+ vp = ump->metadatabitmap_node->vnode;
+ UDF_SET_SYSTEMFILE(vp);
+ }
+#endif
+ }
+
+ /* if we're mounting read-only we relax the requirements */
+ if (ump->vfs_mountp->mnt_flag & MNT_RDONLY) {
+ error = EFAULT;
+ if (ump->metadata_node)
+ error = 0;
+ if ((ump->metadata_node == NULL) && (ump->metadatamirror_node)) {
+ printf( "udf mount: Metadata file not readable, "
+ "substituting Metadata copy file\n");
+ ump->metadata_node = ump->metadatamirror_node;
+ ump->metadatamirror_node = NULL;
+ error = 0;
+ }
+ } else {
+ /* mounting read/write */
+ /* XXX DISABLED! metadata writing is not working yet XXX */
+ if (error)
+ error = EROFS;
+ }
+ DPRINTFIF(VOLUMES, error, ("udf mount: failed to read "
+ "metadata files\n"));
+ return (error);
+}
+/* --------------------------------------------------------------------- */
+
+int
+udf_read_vds_tables(struct udf_mount *ump)
+{
+ union udf_pmap *mapping;
+ int pmap_size;
+ int error = 0;
+ uint32_t n_pm, mt_l, log_part;
+ uint8_t *pmap_pos;
+
+ /* Iterate (again) over the part mappings for locations */
+ n_pm = le32toh(ump->logical_vol->n_pm); /* num partmaps */
+ mt_l = le32toh(ump->logical_vol->mt_l); /* partmaps data length */
+ pmap_pos = ump->logical_vol->maps;
+
+ for (log_part = 0; log_part < n_pm; log_part++) {
+ mapping = (union udf_pmap *) pmap_pos;
+ switch (ump->vtop_tp[log_part]) {
+ case UDF_VTOP_TYPE_PHYS :
+ /* nothing */
+ break;
+ case UDF_VTOP_TYPE_VIRT :
+ /* search and load VAT */
+ error = udf_search_vat(ump, mapping);
+ if (error)
+ return (ENOENT);
+ break;
+ case UDF_VTOP_TYPE_SPARABLE :
+ /* load one of the sparable tables */
+ error = udf_read_sparables(ump, mapping);
+ if (error)
+ return (ENOENT);
+ break;
+ case UDF_VTOP_TYPE_META :
+ /* load the associated file descriptors */
+ error = udf_read_metadata_nodes(ump, mapping);
+ if (error)
+ return (ENOENT);
+ break;
+ default:
+ break;
+ }
+ pmap_size = pmap_pos[1];
+ pmap_pos += pmap_size;
+ }
+
+ /* read in and check unallocated and free space info if writing */
+#if 0
+ if ((ump->vfs_mountp->mnt_flag & MNT_RDONLY) == 0) {
+ error = udf_read_physical_partition_spacetables(ump);
+ if (error)
+ return (error);
+
+ /* also read in metadata partition spacebitmap if defined */
+ error = udf_read_metadata_partition_spacetable(ump);
+ return (error);
+ }
+#endif
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_read_rootdirs(struct udf_mount *ump)
+{
+ union dscrptr *dscr;
+ struct mount *mp;
+ struct vnode *rootdir_node, *streamdir_node;
+ struct long_ad fsd_loc, *dir_loc;
+ ino_t ino;
+ int dscr_type, error;
+ uint32_t lb_num, dummy, fsd_len;
+
+ mp = ump->vfs_mountp;
+
+ /* TODO implement FSD reading in separate function like integrity? */
+ /* get fileset descriptor sequence */
+ fsd_loc = ump->logical_vol->lv_fsd_loc;
+ fsd_len = le32toh(fsd_loc.len);
+
+ dscr = NULL;
+ error = 0;
+ while (fsd_len || error) {
+ DPRINTF(VOLUMES, ("fsd_len = %d\n", fsd_len));
+ /* translate fsd_loc to lb_num */
+ error = udf_translate_vtop(ump, &fsd_loc, &lb_num, &dummy);
+ if (error)
+ break;
+ error = udf_read_phys_dscr(ump, lb_num, M_UDFTEMP, &dscr);
+ /* end markers */
+ if (error || (dscr == NULL))
+ break;
+
+ /* analyse */
+ dscr_type = le16toh(dscr->tag.id);
+ if (dscr_type == TAGID_TERM)
+ break;
+ if (dscr_type != TAGID_FSD) {
+ free(dscr, M_UDFTEMP);
+ return (ENOENT);
+ }
+
+ /*
+ * TODO check for multiple fileset descriptors; its only
+ * picking the last now. Also check for FSD
+ * correctness/interpretability
+ */
+
+ /* update */
+ if (ump->fileset_desc) {
+ free(ump->fileset_desc, M_UDFTEMP);
+ }
+ ump->fileset_desc = &dscr->fsd;
+ dscr = NULL;
+
+ /* continue to the next fsd */
+ fsd_len -= ump->discinfo.sector_size;
+ fsd_loc.loc.lb_num = htole32(le32toh(fsd_loc.loc.lb_num)+1);
+
+ /* follow up to fsd->next_ex (long_ad) if its not null */
+ if (le32toh(ump->fileset_desc->next_ex.len)) {
+ DPRINTF(VOLUMES, ("follow up FSD extent\n"));
+ fsd_loc = ump->fileset_desc->next_ex;
+ fsd_len = le32toh(ump->fileset_desc->next_ex.len);
+ }
+ }
+ if (dscr)
+ free(dscr, M_UDFTEMP);
+
+ /* there has to be one */
+ if (ump->fileset_desc == NULL)
+ return (ENOENT);
+
+ DPRINTF(VOLUMES, ("FSD read in fine\n"));
+ DPRINTF(VOLUMES, ("Updating fsd logical volume id\n"));
+ udf_update_logvolname(ump, ump->logical_vol->logvol_id);
+
+ /*
+ * Now the FSD is known, read in the rootdirectory and if one exists,
+ * the system stream dir. Some files in the system streamdir are not
+ * wanted in this implementation since they are not maintained. If
+ * writing is enabled we'll delete these files if they exist.
+ */
+
+ rootdir_node = streamdir_node = NULL;
+ dir_loc = NULL;
+
+ /* try to read in the rootdir */
+ dir_loc = &ump->fileset_desc->rootdir_icb;
+ ino = udf_get_node_id(dir_loc);
+ error = udf_vget(mp, ino, LK_EXCLUSIVE, &rootdir_node);
+ if (error)
+ return (ENOENT);
+
+ /* apparently it read in fine */
+
+ /*
+ * Try the system stream directory; not very likely in the ones we
+ * test, but for completeness.
+ */
+ dir_loc = &ump->fileset_desc->streamdir_icb;
+ if (le32toh(dir_loc->len)) {
+ ino = udf_get_node_id(dir_loc);
+ error = udf_vget(mp, ino, LK_EXCLUSIVE, &streamdir_node);
+ if (error) {
+ printf("udf_read_rootdirs: streamdir defined: but error in streamdir reading\n");
+ } else {
+ /*
+ * printf("udf_read_rootdirs: streamdir defined: but ignored");
+ * TODO process streamdir `baddies' i.e. files we dont
+ * want if R/W
+ */
+ }
+ }
+
+ /* release the vnodes again; they'll be auto-recycled later */
+ if (streamdir_node) {
+ vput(streamdir_node);
+ }
+ if (rootdir_node) {
+ vput(rootdir_node);
+ }
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* To make absolutely sure we are NOT returning zero, add one :) */
+
+long
+udf_get_node_id(const struct long_ad *icbptr)
+{
+ /* this can fail, but in final version should never fail. */
+ uint32_t blkn, ino;
+ uint16_t part;
+
+ /* Just for now, this should be done in another way. */
+ blkn = le32toh(icbptr->loc.lb_num);
+ part = le16toh(icbptr->loc.part_num);
+
+ if ((blkn + 1) & 0xE0000000) {
+ printf("block number too large to convert to inode number.\n");
+ }
+ if (part & 0xFFF8) {
+ printf("partition number too large to convert to inode number.\n");
+ }
+
+ ino = (blkn + 1) | (part << 29);
+ return (ino);
+}
+
+int
+udf_get_node_longad(const ino_t ino, struct long_ad *icbptr)
+{
+ uint32_t blkn, ino2;
+ uint16_t part;
+
+ /* Just for now, this should be done in another way. */
+ ino2 = ino;
+ blkn = (ino2 & 0x1FFFFFFF) - 1;
+ part = (ino2 & 0xE0000000) >> 29;
+
+ icbptr->loc.lb_num = htole32(blkn);
+ icbptr->loc.part_num = htole16(part);
+
+ return (0);
+}
+
+#if 0
+int
+udf_compare_icb(const struct long_ad *a, const struct long_ad *b)
+{
+ if (le16toh(a->loc.part_num) < le16toh(b->loc.part_num))
+ return (-1);
+ if (le16toh(a->loc.part_num) > le16toh(b->loc.part_num))
+ return (1);
+
+ if (le32toh(a->loc.lb_num) < le32toh(b->loc.lb_num))
+ return (-1);
+ if (le32toh(a->loc.lb_num) > le32toh(b->loc.lb_num))
+ return (1);
+
+ return (0);
+}
+
+
+static int
+udf_compare_rbnodes(const struct rb_node *a, const struct rb_node *b)
+{
+ struct udf_node *a_node = RBTOUDFNODE(a);
+ struct udf_node *b_node = RBTOUDFNODE(b);
+
+ return (udf_compare_icb(&a_node->loc, &b_node->loc));
+}
+
+
+static int
+udf_compare_rbnode_icb(const struct rb_node *a, const void *key)
+{
+ struct udf_node *a_node = RBTOUDFNODE(a);
+ const struct long_ad * const icb = key;
+
+ return (udf_compare_icb(&a_node->loc, icb));
+}
+
+
+static const struct rb_tree_ops udf_node_rbtree_ops = {
+ .rbto_compare_nodes = udf_compare_rbnodes,
+ .rbto_compare_key = udf_compare_rbnode_icb,
+};
+
+
+void
+udf_init_nodes_tree(struct udf_mount *ump)
+{
+ rb_tree_init(&ump->udf_node_tree, &udf_node_rbtree_ops);
+}
+
+
+static struct udf_node *
+udf_node_lookup(struct udf_mount *ump, struct long_ad *icbptr)
+{
+ struct rb_node *rb_node;
+ struct udf_node *udf_node;
+ struct vnode *vp;
+
+loop:
+ mutex_enter(&ump->ihash_lock);
+
+ rb_node = rb_tree_find_node(&ump->udf_node_tree, icbptr);
+ if (rb_node) {
+ udf_node = RBTOUDFNODE(rb_node);
+ vp = udf_node->vnode;
+ assert(vp);
+ mutex_enter(&vp->v_interlock);
+ mutex_exit(&ump->ihash_lock);
+ if (vget(vp, LK_EXCLUSIVE))
+ goto loop;
+ return (udf_node);
+ }
+ mutex_exit(&ump->ihash_lock);
+
+ return (NULL);
+}
+
+
+static void
+udf_register_node(struct udf_node *udf_node)
+{
+ struct udf_mount *ump = udf_node->ump;
+
+ /* add node to the rb tree */
+ mutex_enter(&ump->ihash_lock);
+ rb_tree_insert_node(&ump->udf_node_tree, &udf_node->rbnode);
+ mutex_exit(&ump->ihash_lock);
+}
+
+
+static void
+udf_deregister_node(struct udf_node *udf_node)
+{
+ struct udf_mount *ump = udf_node->ump;
+
+ /* remove node from the rb tree */
+ mutex_enter(&ump->ihash_lock);
+ rb_tree_remove_node(&ump->udf_node_tree, &udf_node->rbnode);
+ mutex_exit(&ump->ihash_lock);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_validate_session_start(struct udf_mount *ump)
+{
+ struct mmc_trackinfo trackinfo;
+ struct vrs_desc *vrs;
+ uint32_t tracknr, sessionnr, sector, sector_size;
+ uint32_t iso9660_vrs, write_track_start;
+ uint8_t *buffer, *blank, *pos;
+ int blks, max_sectors, vrs_len;
+ int error;
+
+ /* disc appendable? */
+ if (ump->discinfo.disc_state == MMC_STATE_FULL)
+ return (EROFS);
+
+ /* already written here? if so, there should be an ISO VDS */
+ if (ump->discinfo.last_session_state == MMC_STATE_INCOMPLETE)
+ return (0);
+
+ /*
+ * Check if the first track of the session is blank and if so, copy or
+ * create a dummy ISO descriptor so the disc is valid again.
+ */
+
+ tracknr = ump->discinfo.first_track_last_session;
+ memset(&trackinfo, 0, sizeof(struct mmc_trackinfo));
+ trackinfo.tracknr = tracknr;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+
+ udf_dump_trackinfo(&trackinfo);
+ KASSERT(trackinfo.flags & (MMC_TRACKINFO_BLANK | MMC_TRACKINFO_RESERVED));
+ KASSERT(trackinfo.sessionnr > 1);
+
+ KASSERT(trackinfo.flags & MMC_TRACKINFO_NWA_VALID);
+ write_track_start = trackinfo.next_writable;
+
+ /* we have to copy the ISO VRS from a former session */
+
+ /* sessionnr should be the session we're mounting */
+ sessionnr = ump->mount_args.sessionnr;
+
+ /* start at the first track */
+ tracknr = ump->discinfo.first_track;
+ while (tracknr <= ump->discinfo.num_tracks) {
+ trackinfo.tracknr = tracknr;
+ error = udf_update_trackinfo(ump, &trackinfo);
+ if (error)
+ return (error);
+ if (trackinfo.sessionnr == sessionnr)
+ break;
+ tracknr++;
+ }
+ if (trackinfo.sessionnr != sessionnr)
+ return (ENOENT);
+
+ udf_dump_trackinfo(&trackinfo);
+
+ /*
+ * location of iso9660 vrs is defined as first sector AFTER 32kb,
+ * minimum ISO `sector size' 2048
+ */
+ sector_size = ump->discinfo.sector_size;
+ iso9660_vrs = ((32*1024 + sector_size - 1) / sector_size)
+ + trackinfo.track_start;
+
+ buffer = malloc(UDF_ISO_VRS_SIZE, M_UDFTEMP, M_WAITOK);
+ max_sectors = UDF_ISO_VRS_SIZE / sector_size;
+ blks = MAX(1, 2048 / sector_size);
+
+ error = 0;
+ for (sector = 0; sector < max_sectors; sector += blks) {
+ pos = buffer + sector * sector_size;
+ error = udf_read_phys_sectors(ump, UDF_C_DSCR, pos,
+ iso9660_vrs + sector, blks);
+ if (error)
+ break;
+ /* check this ISO descriptor */
+ vrs = (struct vrs_desc *) pos;
+ if (strncmp(vrs->identifier, VRS_CD001, 5) == 0)
+ continue;
+ if (strncmp(vrs->identifier, VRS_CDW02, 5) == 0)
+ continue;
+ if (strncmp(vrs->identifier, VRS_BEA01, 5) == 0)
+ continue;
+ if (strncmp(vrs->identifier, VRS_NSR02, 5) == 0)
+ continue;
+ if (strncmp(vrs->identifier, VRS_NSR03, 5) == 0)
+ continue;
+ if (strncmp(vrs->identifier, VRS_TEA01, 5) == 0)
+ break;
+ /* now what? for now, end of sequence */
+ break;
+ }
+ vrs_len = sector + blks;
+ if (error) {
+ memset(buffer, 0, UDF_ISO_VRS_SIZE);
+
+ vrs = (struct vrs_desc *) (buffer);
+ vrs->struct_type = 0;
+ vrs->version = 1;
+ memcpy(vrs->identifier,VRS_BEA01, 5);
+
+ vrs = (struct vrs_desc *) (buffer + 2048);
+ vrs->struct_type = 0;
+ vrs->version = 1;
+ if (le16toh(ump->logical_vol->tag.descriptor_ver) == 2) {
+ memcpy(vrs->identifier,VRS_NSR02, 5);
+ } else {
+ memcpy(vrs->identifier,VRS_NSR03, 5);
+ }
+
+ vrs = (struct vrs_desc *) (buffer + 4096);
+ vrs->struct_type = 0;
+ vrs->version = 1;
+ memcpy(vrs->identifier, VRS_TEA01, 5);
+
+ vrs_len = 3*blks;
+ }
+
+ /*
+ * location of iso9660 vrs is defined as first sector AFTER 32kb,
+ * minimum ISO `sector size' 2048
+ */
+ sector_size = ump->discinfo.sector_size;
+ iso9660_vrs = ((32*1024 + sector_size - 1) / sector_size)
+ + write_track_start;
+
+ /* write out 32 kb */
+ blank = malloc(sector_size, M_UDFTEMP, M_WAITOK);
+ memset(blank, 0, sector_size);
+ error = 0;
+ for (sector = write_track_start; sector < iso9660_vrs; sector ++) {
+ error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE,
+ blank, sector, 1);
+ if (error)
+ break;
+ }
+ if (!error) {
+ /* write out our ISO VRS */
+ KASSERT(sector == iso9660_vrs);
+ error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE, buffer,
+ sector, vrs_len);
+ sector += vrs_len;
+ }
+ if (!error) {
+ /* fill upto the first anchor at S+256 */
+ for (; sector < write_track_start+256; sector++) {
+ error = udf_write_phys_sectors(ump, UDF_C_ABSOLUTE,
+ blank, sector, 1);
+ if (error)
+ break;
+ }
+ }
+ if (!error) {
+ /* write out anchor; write at ABSOLUTE place! */
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_ABSOLUTE,
+ (union dscrptr *) ump->anchors[0], sector, sector);
+ if (error)
+ printf("writeout of anchor failed!\n");
+ }
+
+ free(blank, M_UDFTEMP);
+ free(buffer, M_UDFTEMP);
+
+ if (error)
+ printf("udf_open_session: error writing iso vrs! : "
+ "leaving disc in compromised state!\n");
+
+ /* synchronise device caches */
+ (void) udf_synchronise_caches(ump);
+
+ return (error);
+}
+
+
+int
+udf_open_logvol(struct udf_mount *ump)
+{
+ int logvol_integrity;
+ int error;
+
+ /* already/still open? */
+ logvol_integrity = le32toh(ump->logvol_integrity->integrity_type);
+ if (logvol_integrity == UDF_INTEGRITY_OPEN)
+ return (0);
+
+ /* can we open it ? */
+ if (ump->vfs_mountp->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* setup write parameters */
+ if ((error = udf_setup_writeparams(ump)) != 0)
+ return (error);
+
+ /* determine data and metadata tracks (most likely same) */
+ error = udf_search_writing_tracks(ump);
+ if (error) {
+ /* most likely lack of space */
+ printf("udf_open_logvol: error searching writing tracks\n");
+ return (EROFS);
+ }
+
+ /* writeout/update lvint on disc or only in memory */
+ if (ump->lvopen & UDF_OPEN_SESSION) {
+ /* TODO optional track reservation opening */
+ error = udf_validate_session_start(ump);
+ if (error)
+ return (error);
+
+ /* determine data and metadata tracks again */
+ error = udf_search_writing_tracks(ump);
+ }
+
+ /* mark it open */
+ ump->logvol_integrity->integrity_type = htole32(UDF_INTEGRITY_OPEN);
+
+ /* do we need to write it out? */
+ if (ump->lvopen & UDF_WRITE_LVINT) {
+ error = udf_writeout_lvint(ump, ump->lvopen);
+ /* if we couldn't write it mark it closed again */
+ if (error) {
+ ump->logvol_integrity->integrity_type =
+ htole32(UDF_INTEGRITY_CLOSED);
+ return (error);
+ }
+ }
+
+ return (0);
+}
+#endif
+
+#if 0
+int
+udf_close_logvol(struct udf_mount *ump, int mntflags)
+{
+ struct vnode *devvp = ump->devvp;
+ struct mmc_op mmc_op;
+ int logvol_integrity;
+ int error = 0, error1 = 0, error2 = 0;
+ int tracknr;
+ int nvats, n, nok;
+
+ /* already/still closed? */
+ logvol_integrity = le32toh(ump->logvol_integrity->integrity_type);
+ if (logvol_integrity == UDF_INTEGRITY_CLOSED)
+ return (0);
+
+ /* writeout/update lvint or write out VAT */
+ DPRINTF(VOLUMES, ("udf_close_logvol: closing logical volume\n"));
+#ifdef DIAGNOSTIC
+ if (ump->lvclose & UDF_CLOSE_SESSION)
+ KASSERT(ump->lvclose & UDF_WRITE_VAT);
+#endif
+
+ if (ump->lvclose & UDF_WRITE_VAT) {
+ DPRINTF(VOLUMES, ("lvclose & UDF_WRITE_VAT\n"));
+
+ /* write out the VAT data and all its descriptors */
+ DPRINTF(VOLUMES, ("writeout vat_node\n"));
+ udf_writeout_vat(ump);
+ vflushbuf(ump->vat_node->vnode, 1 /* sync */);
+
+ (void) VOP_FSYNC(ump->vat_node->vnode,
+ FSCRED, FSYNC_WAIT, 0, 0);
+
+ if (ump->lvclose & UDF_CLOSE_SESSION) {
+ DPRINTF(VOLUMES, ("udf_close_logvol: closing session "
+ "as requested\n"));
+ }
+
+ /* at least two DVD packets and 3 CD-R packets */
+ nvats = 32;
+
+#if notyet
+ /*
+ * TODO calculate the available space and if the disc is
+ * allmost full, write out till end-256-1 with banks, write
+ * AVDP and fill up with VATs, then close session and close
+ * disc.
+ */
+ if (ump->lvclose & UDF_FINALISE_DISC) {
+ error = udf_write_phys_dscr_sync(ump, NULL,
+ UDF_C_FLOAT_DSCR,
+ (union dscrptr *) ump->anchors[0],
+ 0, 0);
+ if (error)
+ printf("writeout of anchor failed!\n");
+
+ /* pad space with VAT ICBs */
+ nvats = 256;
+ }
+#endif
+
+ /* write out a number of VAT nodes */
+ nok = 0;
+ for (n = 0; n < nvats; n++) {
+ /* will now only write last FE/EFE */
+ ump->vat_node->i_flags |= IN_MODIFIED;
+ error = VOP_FSYNC(ump->vat_node->vnode,
+ FSCRED, FSYNC_WAIT, 0, 0);
+ if (!error)
+ nok++;
+ }
+ if (nok < 14) {
+ /* arbitrary; but at least one or two CD frames */
+ printf("writeout of at least 14 VATs failed\n");
+ return (error);
+ }
+ }
+
+ /* NOTE the disc is in a (minimal) valid state now; no erroring out */
+
+ /* finish closing of session */
+ if (ump->lvclose & UDF_CLOSE_SESSION) {
+ error = udf_validate_session_start(ump);
+ if (error)
+ return (error);
+
+ (void) udf_synchronise_caches(ump);
+
+ /* close all associated tracks */
+ tracknr = ump->discinfo.first_track_last_session;
+ error = 0;
+ while (tracknr <= ump->discinfo.last_track_last_session) {
+ DPRINTF(VOLUMES, ("\tclosing possible open "
+ "track %d\n", tracknr));
+ memset(&mmc_op, 0, sizeof(mmc_op));
+ mmc_op.operation = MMC_OP_CLOSETRACK;
+ mmc_op.mmc_profile = ump->discinfo.mmc_profile;
+ mmc_op.tracknr = tracknr;
+ error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
+ FKIOCTL, NOCRED);
+ if (error)
+ printf("udf_close_logvol: closing of "
+ "track %d failed\n", tracknr);
+ tracknr ++;
+ }
+ if (!error) {
+ DPRINTF(VOLUMES, ("closing session\n"));
+ memset(&mmc_op, 0, sizeof(mmc_op));
+ mmc_op.operation = MMC_OP_CLOSESESSION;
+ mmc_op.mmc_profile = ump->discinfo.mmc_profile;
+ mmc_op.sessionnr = ump->discinfo.num_sessions;
+ error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
+ FKIOCTL, NOCRED);
+ if (error)
+ printf("udf_close_logvol: closing of session"
+ "failed\n");
+ }
+ if (!error)
+ ump->lvopen |= UDF_OPEN_SESSION;
+ if (error) {
+ printf("udf_close_logvol: leaving disc as it is\n");
+ ump->lvclose &= ~UDF_FINALISE_DISC;
+ }
+ }
+
+ if (ump->lvclose & UDF_FINALISE_DISC) {
+ memset(&mmc_op, 0, sizeof(mmc_op));
+ mmc_op.operation = MMC_OP_FINALISEDISC;
+ mmc_op.mmc_profile = ump->discinfo.mmc_profile;
+ mmc_op.sessionnr = ump->discinfo.num_sessions;
+ error = VOP_IOCTL(devvp, MMCOP, &mmc_op,
+ FKIOCTL, NOCRED);
+ if (error)
+ printf("udf_close_logvol: finalising disc"
+ "failed\n");
+ }
+
+ /* write out partition bitmaps if requested */
+ if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
+ /* sync writeout metadata spacetable if existing */
+ error1 = udf_write_metadata_partition_spacetable(ump, true);
+ if (error1)
+ printf( "udf_close_logvol: writeout of metadata space "
+ "bitmap failed\n");
+
+ /* sync writeout partition spacetables */
+ error2 = udf_write_physical_partition_spacetables(ump, true);
+ if (error2)
+ printf( "udf_close_logvol: writeout of space tables "
+ "failed\n");
+
+ if (error1 || error2)
+ return (error1 | error2);
+
+ ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
+ }
+
+ /* write out metadata partition nodes if requested */
+ if (ump->lvclose & UDF_WRITE_METAPART_NODES) {
+ /* sync writeout metadata descriptor node */
+ error1 = udf_writeout_node(ump->metadata_node, FSYNC_WAIT);
+ if (error1)
+ printf( "udf_close_logvol: writeout of metadata partition "
+ "node failed\n");
+
+ /* duplicate metadata partition descriptor if needed */
+ udf_synchronise_metadatamirror_node(ump);
+
+ /* sync writeout metadatamirror descriptor node */
+ error2 = udf_writeout_node(ump->metadatamirror_node, FSYNC_WAIT);
+ if (error2)
+ printf( "udf_close_logvol: writeout of metadata partition "
+ "mirror node failed\n");
+
+ if (error1 || error2)
+ return (error1 | error2);
+
+ ump->lvclose &= ~UDF_WRITE_METAPART_NODES;
+ }
+
+ /* mark it closed */
+ ump->logvol_integrity->integrity_type = htole32(UDF_INTEGRITY_CLOSED);
+
+ /* do we need to write out the logical volume integrity? */
+ if (ump->lvclose & UDF_WRITE_LVINT)
+ error = udf_writeout_lvint(ump, ump->lvopen);
+ if (error) {
+ /* HELP now what? mark it open again for now */
+ ump->logvol_integrity->integrity_type =
+ htole32(UDF_INTEGRITY_OPEN);
+ return (error);
+ }
+
+ (void) udf_synchronise_caches(ump);
+
+ return (0);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Genfs interfacing
+ *
+ * static const struct genfs_ops udf_genfsops = {
+ * .gop_size = genfs_size,
+ * size of transfers
+ * .gop_alloc = udf_gop_alloc,
+ * allocate len bytes at offset
+ * .gop_write = genfs_gop_write,
+ * putpages interface code
+ * .gop_markupdate = udf_gop_markupdate,
+ * set update/modify flags etc.
+ * }
+ */
+
+/*
+ * Genfs interface. These four functions are the only ones defined though not
+ * documented... great....
+ */
+
+/*
+ * Called for allocating an extent of the file either by VOP_WRITE() or by
+ * genfs filling up gaps.
+ */
+#if 0
+static int
+udf_gop_alloc(struct vnode *vp, off_t off,
+ off_t len, int flags, kauth_cred_t cred)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ struct udf_mount *ump = udf_node->ump;
+ uint64_t lb_start, lb_end;
+ uint32_t lb_size, num_lb;
+ int udf_c_type, vpart_num, can_fail;
+ int error;
+
+ DPRINTF(ALLOC, ("udf_gop_alloc called for offset %"PRIu64" for %"PRIu64" bytes, %s\n",
+ off, len, flags? "SYNC":"NONE"));
+
+ /*
+ * request the pages of our vnode and see how many pages will need to
+ * be allocated and reserve that space
+ */
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+ lb_start = off / lb_size;
+ lb_end = (off + len + lb_size -1) / lb_size;
+ num_lb = lb_end - lb_start;
+
+ udf_c_type = udf_get_c_type(udf_node);
+ vpart_num = udf_get_record_vpart(ump, udf_c_type);
+
+ /* all requests can fail */
+ can_fail = true;
+
+ /* fid's (directories) can't fail */
+ if (udf_c_type == UDF_C_FIDS)
+ can_fail = false;
+
+ /* system files can't fail */
+ if (vp->v_vflag & VV_SYSTEM)
+ can_fail = false;
+
+ error = udf_reserve_space(ump, udf_node, udf_c_type,
+ vpart_num, num_lb, can_fail);
+
+ DPRINTF(ALLOC, ("\tlb_start %"PRIu64", lb_end %"PRIu64", num_lb %d\n",
+ lb_start, lb_end, num_lb));
+
+ return (error);
+}
+
+
+/*
+ * callback from genfs to update our flags
+ */
+static void
+udf_gop_markupdate(struct vnode *vp, int flags)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ u_long mask = 0;
+
+ if ((flags & GOP_UPDATE_ACCESSED) != 0) {
+ mask = IN_ACCESS;
+ }
+ if ((flags & GOP_UPDATE_MODIFIED) != 0) {
+ if (vp->v_type == VREG) {
+ mask |= IN_CHANGE | IN_UPDATE;
+ } else {
+ mask |= IN_MODIFY;
+ }
+ }
+ if (mask) {
+ udf_node->i_flags |= mask;
+ }
+}
+
+
+static const struct genfs_ops udf_genfsops = {
+ .gop_size = genfs_size,
+ .gop_alloc = udf_gop_alloc,
+ .gop_write = genfs_gop_write_rwmap,
+ .gop_markupdate = udf_gop_markupdate,
+};
+
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_write_terminator(struct udf_mount *ump, uint32_t sector)
+{
+ union dscrptr *dscr;
+ int error;
+
+ dscr = malloc(ump->discinfo.sector_size, M_UDFTEMP, M_WAITOK|M_ZERO);
+ udf_inittag(ump, &dscr->tag, TAGID_TERM, sector);
+
+ /* CRC length for an anchor is 512 - tag length; defined in Ecma 167 */
+ dscr->tag.desc_crc_len = le16toh(512-UDF_DESC_TAG_LENGTH);
+ (void) udf_validate_tag_and_crc_sums(dscr);
+
+ error = udf_write_phys_dscr_sync(ump, NULL, UDF_C_DSCR,
+ dscr, sector, sector);
+
+ free(dscr, M_UDFTEMP);
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/* UDF<->unix converters */
+
+/* --------------------------------------------------------------------- */
+
+static mode_t
+udf_perm_to_unix_mode(uint32_t perm)
+{
+ mode_t mode;
+
+ mode = ((perm & UDF_FENTRY_PERM_USER_MASK) );
+ mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK ) >> 2);
+ mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4);
+
+ return (mode);
+}
+
+/* --------------------------------------------------------------------- */
+#if 0
+static uint32_t
+unix_mode_to_udf_perm(mode_t mode)
+{
+ uint32_t perm;
+
+ perm = ((mode & S_IRWXO) );
+ perm |= ((mode & S_IRWXG) << 2);
+ perm |= ((mode & S_IRWXU) << 4);
+ perm |= ((mode & S_IWOTH) << 3);
+ perm |= ((mode & S_IWGRP) << 5);
+ perm |= ((mode & S_IWUSR) << 7);
+
+ return (perm);
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+static uint32_t
+udf_icb_to_unix_filetype(uint32_t icbftype)
+{
+ switch (icbftype) {
+ case UDF_ICB_FILETYPE_DIRECTORY :
+ case UDF_ICB_FILETYPE_STREAMDIR :
+ return (S_IFDIR);
+ case UDF_ICB_FILETYPE_FIFO :
+ return (S_IFIFO);
+ case UDF_ICB_FILETYPE_CHARDEVICE :
+ return (S_IFCHR);
+ case UDF_ICB_FILETYPE_BLOCKDEVICE :
+ return (S_IFBLK);
+ case UDF_ICB_FILETYPE_RANDOMACCESS :
+ case UDF_ICB_FILETYPE_REALTIME :
+ return (S_IFREG);
+ case UDF_ICB_FILETYPE_SYMLINK :
+ return (S_IFLNK);
+ case UDF_ICB_FILETYPE_SOCKET :
+ return (S_IFSOCK);
+ }
+ /* no idea what this is */
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+/* These timestamp_to_timespec functions are done. */
+static const int days_to_mon[12] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
+static int udf_leapyear(int year) {
+ int i;
+ i = (year % 400 == 0) ? 1 : 0;
+ i |= (year % 100 == 0) ? 0 : 1;
+ i &= (year % 4 == 0) ? 1 : 0;
+ return (i);
+}
+void
+udf_timestamp_to_timespec(struct udf_mount *ump,
+ struct timestamp *timestamp,
+ struct timespec *timespec)
+{
+ time_t secs;
+ uint32_t usecs, nsecs;
+ uint16_t tz, year;
+
+ year = le16toh(timestamp->year);
+ if (year < 1970 || timestamp->month < 1 || timestamp->month > 12) {
+ timespec->tv_sec = 0;
+ timespec->tv_nsec = 0;
+ return;
+ }
+
+ secs = timestamp->second;
+ secs += timestamp->minute * 60;
+ secs += timestamp->hour * 3600;
+ secs += (timestamp->day - 1) * 3600 * 24;
+ secs += days_to_mon[timestamp->month - 1] * 3600 * 24;
+
+ secs += (year - 1970) * 365 * 3600 * 24;
+ secs += ((year - 1 - 1968) / 4) * 3600 * 24;
+
+ if (year > 2100) {
+ secs -= (((year - 1 - 2100) / 100) + 1) * 3600 * 24;
+ }
+ if (year > 2400) {
+ secs += (((year - 1 - 2400) / 400) + 1) * 3600 * 24;
+ }
+ if (timestamp->month > 2) {
+ secs += (time_t)udf_leapyear(year) * 3600 * 24;
+ }
+
+ usecs = timestamp->usec +
+ 100*timestamp->hund_usec + 10000*timestamp->centisec;
+ nsecs = usecs * 1000;
+
+ /*
+ * Calculate the time zone. The timezone is 12 bit signed 2's
+ * compliment, so we gotta do some extra magic to handle it right.
+ */
+ tz = le16toh(timestamp->type_tz);
+ tz &= 0x0fff; /* only lower 12 bits are significant */
+ if (tz & 0x0800) /* sign extension */
+ tz |= 0xf000;
+
+ /*
+ * TODO check timezone conversion
+ * check if we are specified a timezone to convert
+ */
+ if (le16toh(timestamp->type_tz) & 0x1000) {
+ if ((int16_t) tz != -2047)
+ secs -= (int16_t) tz * 60;
+ } /* else {
+ secs -= ump->mount_args.gmtoff;
+ } */
+
+ timespec->tv_sec = secs;
+ timespec->tv_nsec = nsecs;
+
+ return;
+}
+#if 0
+
+void
+udf_timespec_to_timestamp(struct timespec *timespec, struct timestamp *timestamp)
+{
+ struct clock_ymdhms ymdhms;
+ uint32_t husec, usec, csec;
+
+ (void) clock_secs_to_ymdhms(timespec->tv_sec, &ymdhms);
+
+ usec = timespec->tv_nsec / 1000;
+ husec = usec / 100;
+ usec -= husec * 100; /* only 0-99 in usec */
+ csec = husec / 100; /* only 0-99 in csec */
+ husec -= csec * 100; /* only 0-99 in husec */
+
+ /* set method 1 for CUT/GMT */
+ timestamp->type_tz = htole16((1<<12) + 0);
+ timestamp->year = htole16(ymdhms.dt_year);
+ timestamp->month = ymdhms.dt_mon;
+ timestamp->day = ymdhms.dt_day;
+ timestamp->hour = ymdhms.dt_hour;
+ timestamp->minute = ymdhms.dt_min;
+ timestamp->second = ymdhms.dt_sec;
+ timestamp->centisec = csec;
+ timestamp->hund_usec = husec;
+ timestamp->usec = usec;
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+/*
+ * Attribute and filetypes converters with get/set pairs
+ */
+
+uint32_t
+udf_getaccessmode(struct udf_node *udf_node)
+{
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ uint32_t udf_perm, icbftype, mode, ftype;
+ uint16_t icbflags;
+
+/* UDF_LOCK_NODE(udf_node, 0); */
+ if (fe) {
+ udf_perm = le32toh(fe->perm);
+ icbftype = fe->icbtag.file_type;
+ icbflags = le16toh(fe->icbtag.flags);
+ } else {
+ KASSERT(udf_node->efe, ("Extended File Entry is null"));
+ udf_perm = le32toh(efe->perm);
+ icbftype = efe->icbtag.file_type;
+ icbflags = le16toh(efe->icbtag.flags);
+ }
+
+ mode = udf_perm_to_unix_mode(udf_perm);
+ ftype = udf_icb_to_unix_filetype(icbftype);
+
+ /* set suid, sgid, sticky from flags in fe/efe */
+ if (icbflags & UDF_ICB_TAG_FLAGS_SETUID)
+ mode |= S_ISUID;
+ if (icbflags & UDF_ICB_TAG_FLAGS_SETGID)
+ mode |= S_ISGID;
+ if (icbflags & UDF_ICB_TAG_FLAGS_STICKY)
+ mode |= S_ISVTX;
+
+/* UDF_UNLOCK_NODE(udf_node, 0); */
+
+ return (mode | ftype);
+}
+
+#if 0
+void
+udf_setaccessmode(struct udf_node *udf_node, mode_t mode)
+{
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ uint32_t udf_perm;
+ uint16_t icbflags;
+
+ UDF_LOCK_NODE(udf_node, 0);
+ udf_perm = unix_mode_to_udf_perm(mode & ALLPERMS);
+ if (fe) {
+ icbflags = le16toh(fe->icbtag.flags);
+ } else {
+ icbflags = le16toh(efe->icbtag.flags);
+ }
+
+ icbflags &= ~UDF_ICB_TAG_FLAGS_SETUID;
+ icbflags &= ~UDF_ICB_TAG_FLAGS_SETGID;
+ icbflags &= ~UDF_ICB_TAG_FLAGS_STICKY;
+ if (mode & S_ISUID)
+ icbflags |= UDF_ICB_TAG_FLAGS_SETUID;
+ if (mode & S_ISGID)
+ icbflags |= UDF_ICB_TAG_FLAGS_SETGID;
+ if (mode & S_ISVTX)
+ icbflags |= UDF_ICB_TAG_FLAGS_STICKY;
+
+ if (fe) {
+ fe->perm = htole32(udf_perm);
+ fe->icbtag.flags = htole16(icbflags);
+ } else {
+ efe->perm = htole32(udf_perm);
+ efe->icbtag.flags = htole16(icbflags);
+ }
+
+ UDF_UNLOCK_NODE(udf_node, 0);
+}
+
+
+void
+udf_getownership(struct udf_node *udf_node, uid_t *uidp, gid_t *gidp)
+{
+ struct udf_mount *ump = udf_node->ump;
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ uid_t uid;
+ gid_t gid;
+
+ UDF_LOCK_NODE(udf_node, 0);
+ if (fe) {
+ uid = (uid_t)le32toh(fe->uid);
+ gid = (gid_t)le32toh(fe->gid);
+ } else {
+ assert(udf_node->efe);
+ uid = (uid_t)le32toh(efe->uid);
+ gid = (gid_t)le32toh(efe->gid);
+ }
+
+ /* do the uid/gid translation game */
+ if (uid == (uid_t) -1)
+ uid = ump->mount_args.anon_uid;
+ if (gid == (gid_t) -1)
+ gid = ump->mount_args.anon_gid;
+
+ *uidp = uid;
+ *gidp = gid;
+
+ UDF_UNLOCK_NODE(udf_node, 0);
+}
+
+
+void
+udf_setownership(struct udf_node *udf_node, uid_t uid, gid_t gid)
+{
+ struct udf_mount *ump = udf_node->ump;
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ uid_t nobody_uid;
+ gid_t nobody_gid;
+
+ UDF_LOCK_NODE(udf_node, 0);
+
+ /* do the uid/gid translation game */
+ nobody_uid = ump->mount_args.nobody_uid;
+ nobody_gid = ump->mount_args.nobody_gid;
+ if (uid == nobody_uid)
+ uid = (uid_t) -1;
+ if (gid == nobody_gid)
+ gid = (gid_t) -1;
+
+ if (fe) {
+ fe->uid = htole32((uint32_t) uid);
+ fe->gid = htole32((uint32_t) gid);
+ } else {
+ efe->uid = htole32((uint32_t) uid);
+ efe->gid = htole32((uint32_t) gid);
+ }
+
+ UDF_UNLOCK_NODE(udf_node, 0);
+}
+
+
+/* --------------------------------------------------------------------- */
+
+
+static int
+dirhash_fill(struct udf_node *dir_node)
+{
+ struct vnode *dvp = dir_node->vnode;
+ struct dirhash *dirh;
+ struct file_entry *fe = dir_node->fe;
+ struct extfile_entry *efe = dir_node->efe;
+ struct fileid_desc *fid;
+ struct dirent *dirent;
+ uint64_t file_size, pre_diroffset, diroffset = 0;
+ uint32_t lb_size;
+ int error = 0;
+
+ /* make sure we have a dirhash to work on */
+ dirh = dir_node->dir_hash;
+ KASSERT(dirh, ("dirhash_fill: dirhash is null"));
+ KASSERT(dirh->dn_refcount > 0, ("dirhash_fill: dirhash does not have reference"));
+
+#if 0
+ if (dirh->flags & DIRH_BROKEN)
+ return (EIO);
+ if (dirh->flags & DIRH_COMPLETE)
+ return (0);
+
+ /* make sure we have a clean dirhash to add to */
+ dirhash_purge_entries(dirh);
+#endif
+
+ /* get directory filesize */
+ if (fe) {
+ file_size = le64toh(fe->inf_len);
+ } else {
+ KASSERT(node->efe, ("Extended File Entry is null"));
+ file_size = le64toh(efe->inf_len);
+ }
+
+ /* allocate temporary space for fid */
+ lb_size = le32toh(dir_node->ump->logical_vol->lb_size);
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+
+ /* allocate temporary space for dirent */
+ dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
+
+ while (diroffset < file_size) {
+ /* transfer a new fid/dirent */
+ pre_diroffset = diroffset;
+ error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
+ if (error) {
+ /* TODO what to do? continue but not add? */
+ dirh->flags |= DIRH_BROKEN;
+ dirhash_purge_entries(dirh);
+ break;
+ }
+
+ if ((fid->file_char & UDF_FILE_CHAR_DEL)) {
+ /* register deleted extent for reuse */
+ dirhash_enter_freed(dirh, pre_diroffset,
+ udf_fidsize(fid));
+ } else {
+ /* append to the dirhash */
+ dirhash_enter(dirh, dirent, pre_diroffset,
+ udf_fidsize(fid), 0);
+ }
+ }
+ dirh->flags |= DIRH_COMPLETE;
+
+ free(fid, M_UDFTEMP);
+ free(dirent, M_UDFTEMP);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Directory read and manipulation functions.
+ *
+ */
+
+int
+udf_lookup_name_in_dir(struct vnode *vp, const char *name, int namelen,
+ struct long_ad *icb_loc, int *found)
+{
+ struct udf_node *dir_node = VTOI(vp);
+ struct dirhash *dirh;
+ struct dirhash_entry *dirh_ep;
+ struct fileid_desc *fid;
+ struct dirent *dirent;
+ uint64_t diroffset;
+ uint32_t lb_size;
+ int hit, error;
+
+ /* set default return */
+ *found = 0;
+
+ /* get our dirhash and make sure its read in */
+ dirhash_get(&dir_node->dir_hash);
+ error = dirhash_fill(dir_node);
+ if (error) {
+ dirhash_put(dir_node->dir_hash);
+ return (error);
+ }
+ dirh = dir_node->dir_hash;
+
+ /* allocate temporary space for fid */
+ lb_size = le32toh(dir_node->ump->logical_vol->lb_size);
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+ dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
+
+ /* search our dirhash hits */
+ memset(icb_loc, 0, sizeof(*icb_loc));
+ dirh_ep = NULL;
+ for (;;) {
+ hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
+ /* if no hit, abort the search */
+ if (!hit)
+ break;
+
+ /* check this hit */
+ diroffset = dirh_ep->offset;
+
+ /* transfer a new fid/dirent */
+ error = udf_read_fid_stream(vp, &diroffset, fid, dirent);
+ if (error)
+ break;
+
+ /* see if its our entry */
+ KASSERT(dirent->d_namlen == namelen);
+ if (strncmp(dirent->d_name, name, namelen) == 0) {
+ *found = 1;
+ *icb_loc = fid->icb;
+ break;
+ }
+ }
+ free(fid, M_UDFTEMP);
+ free(dirent, M_UDFTEMP);
+
+ dirhash_put(dir_node->dir_hash);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_create_new_fe(struct udf_mount *ump, struct file_entry *fe, int file_type,
+ struct long_ad *node_icb, struct long_ad *parent_icb,
+ uint64_t parent_unique_id)
+{
+ struct timespec now;
+ struct icb_tag *icb;
+ struct filetimes_extattr_entry *ft_extattr;
+ uint64_t unique_id;
+ uint32_t fidsize, lb_num;
+ uint8_t *bpos;
+ int crclen, attrlen;
+
+ lb_num = le32toh(node_icb->loc.lb_num);
+ udf_inittag(ump, &fe->tag, TAGID_FENTRY, lb_num);
+ icb = &fe->icbtag;
+
+ /*
+ * Always use strategy type 4 unless on WORM wich we don't support
+ * (yet). Fill in defaults and set for internal allocation of data.
+ */
+ icb->strat_type = htole16(4);
+ icb->max_num_entries = htole16(1);
+ icb->file_type = file_type; /* 8 bit */
+ icb->flags = htole16(UDF_ICB_INTERN_ALLOC);
+
+ fe->perm = htole32(0x7fff); /* all is allowed */
+ fe->link_cnt = htole16(0); /* explicit setting */
+
+ fe->ckpoint = htole32(1); /* user supplied file version */
+
+ vfs_timestamp(&now);
+ udf_timespec_to_timestamp(&now, &fe->atime);
+ udf_timespec_to_timestamp(&now, &fe->attrtime);
+ udf_timespec_to_timestamp(&now, &fe->mtime);
+
+ udf_set_regid(&fe->imp_id, IMPL_NAME);
+ udf_add_impl_regid(ump, &fe->imp_id);
+
+ unique_id = udf_advance_uniqueid(ump);
+ fe->unique_id = htole64(unique_id);
+ fe->l_ea = htole32(0);
+
+ /* create extended attribute to record our creation time */
+ attrlen = UDF_FILETIMES_ATTR_SIZE(1);
+ ft_extattr = malloc(attrlen, M_UDFTEMP, M_WAITOK);
+ memset(ft_extattr, 0, attrlen);
+ ft_extattr->hdr.type = htole32(UDF_FILETIMES_ATTR_NO);
+ ft_extattr->hdr.subtype = 1; /* [4/48.10.5] */
+ ft_extattr->hdr.a_l = htole32(UDF_FILETIMES_ATTR_SIZE(1));
+ ft_extattr->d_l = htole32(UDF_TIMESTAMP_SIZE); /* one item */
+ ft_extattr->existence = UDF_FILETIMES_FILE_CREATION;
+ udf_timespec_to_timestamp(&now, &ft_extattr->times[0]);
+
+ udf_extattr_insert_internal(ump, (union dscrptr *) fe,
+ (struct extattr_entry *) ft_extattr);
+ free(ft_extattr, M_UDFTEMP);
+
+ /* if its a directory, create '..' */
+ bpos = (uint8_t *) fe->data + le32toh(fe->l_ea);
+ fidsize = 0;
+ if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
+ fidsize = udf_create_parentfid(ump,
+ (struct fileid_desc *) bpos, parent_icb,
+ parent_unique_id);
+ }
+
+ /* record fidlength information */
+ fe->inf_len = htole64(fidsize);
+ fe->l_ad = htole32(fidsize);
+ fe->logblks_rec = htole64(0); /* intern */
+
+ crclen = sizeof(struct file_entry) - 1 - UDF_DESC_TAG_LENGTH;
+ crclen += le32toh(fe->l_ea) + fidsize;
+ fe->tag.desc_crc_len = htole16(crclen);
+
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) fe);
+
+ return (fidsize);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_create_new_efe(struct udf_mount *ump, struct extfile_entry *efe,
+ int file_type, struct long_ad *node_icb, struct long_ad *parent_icb,
+ uint64_t parent_unique_id)
+{
+ struct timespec now;
+ struct icb_tag *icb;
+ uint64_t unique_id;
+ uint32_t fidsize, lb_num;
+ uint8_t *bpos;
+ int crclen;
+
+ lb_num = le32toh(node_icb->loc.lb_num);
+ udf_inittag(ump, &efe->tag, TAGID_EXTFENTRY, lb_num);
+ icb = &efe->icbtag;
+
+ /*
+ * Always use strategy type 4 unless on WORM wich we don't support
+ * (yet). Fill in defaults and set for internal allocation of data.
+ */
+ icb->strat_type = htole16(4);
+ icb->max_num_entries = htole16(1);
+ icb->file_type = file_type; /* 8 bit */
+ icb->flags = htole16(UDF_ICB_INTERN_ALLOC);
+
+ efe->perm = htole32(0x7fff); /* all is allowed */
+ efe->link_cnt = htole16(0); /* explicit setting */
+
+ efe->ckpoint = htole32(1); /* user supplied file version */
+
+ vfs_timestamp(&now);
+ udf_timespec_to_timestamp(&now, &efe->ctime);
+ udf_timespec_to_timestamp(&now, &efe->atime);
+ udf_timespec_to_timestamp(&now, &efe->attrtime);
+ udf_timespec_to_timestamp(&now, &efe->mtime);
+
+ udf_set_regid(&efe->imp_id, IMPL_NAME);
+ udf_add_impl_regid(ump, &efe->imp_id);
+
+ unique_id = udf_advance_uniqueid(ump);
+ efe->unique_id = htole64(unique_id);
+ efe->l_ea = htole32(0);
+
+ /* if its a directory, create '..' */
+ bpos = (uint8_t *) efe->data + le32toh(efe->l_ea);
+ fidsize = 0;
+ if (file_type == UDF_ICB_FILETYPE_DIRECTORY) {
+ fidsize = udf_create_parentfid(ump,
+ (struct fileid_desc *) bpos, parent_icb,
+ parent_unique_id);
+ }
+
+ /* record fidlength information */
+ efe->obj_size = htole64(fidsize);
+ efe->inf_len = htole64(fidsize);
+ efe->l_ad = htole32(fidsize);
+ efe->logblks_rec = htole64(0); /* intern */
+
+ crclen = sizeof(struct extfile_entry) - 1 - UDF_DESC_TAG_LENGTH;
+ crclen += le32toh(efe->l_ea) + fidsize;
+ efe->tag.desc_crc_len = htole16(crclen);
+
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) efe);
+
+ return (fidsize);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_dir_detach(struct udf_mount *ump, struct udf_node *dir_node,
+ struct udf_node *udf_node, struct componentname *cnp)
+{
+ struct vnode *dvp = dir_node->vnode;
+ struct dirhash *dirh;
+ struct dirhash_entry *dirh_ep;
+ struct file_entry *fe = dir_node->fe;
+ struct extfile_entry *efe = dir_node->efe;
+ struct fileid_desc *fid;
+ struct dirent *dirent;
+ uint64_t file_size, diroffset;
+ uint32_t lb_size, fidsize;
+ int found, error;
+ char const *name = cnp->cn_nameptr;
+ int namelen = cnp->cn_namelen;
+ int hit, refcnt;
+
+ /* get our dirhash and make sure its read in */
+ dirhash_get(&dir_node->dir_hash);
+ error = dirhash_fill(dir_node);
+ if (error) {
+ dirhash_put(dir_node->dir_hash);
+ return (error);
+ }
+ dirh = dir_node->dir_hash;
+
+ /* get directory filesize */
+ if (fe) {
+ file_size = le64toh(fe->inf_len);
+ } else {
+ assert(efe);
+ file_size = le64toh(efe->inf_len);
+ }
+
+ /* allocate temporary space for fid */
+ lb_size = le32toh(dir_node->ump->logical_vol->lb_size);
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+ dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
+
+ /* search our dirhash hits */
+ found = 0;
+ dirh_ep = NULL;
+ for (;;) {
+ hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
+ /* if no hit, abort the search */
+ if (!hit)
+ break;
+
+ /* check this hit */
+ diroffset = dirh_ep->offset;
+
+ /* transfer a new fid/dirent */
+ error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
+ if (error)
+ break;
+
+ /* see if its our entry */
+ KASSERT(dirent->d_namlen == namelen);
+ if (strncmp(dirent->d_name, name, namelen) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ error = ENOENT;
+ if (error)
+ goto error_out;
+
+ /* mark deleted */
+ fid->file_char |= UDF_FILE_CHAR_DEL;
+#ifdef UDF_COMPLETE_DELETE
+ memset(&fid->icb, 0, sizeof(fid->icb));
+#endif
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
+
+ /* get size of fid and compensate for the read_fid_stream advance */
+ fidsize = udf_fidsize(fid);
+ diroffset -= fidsize;
+
+ /* write out */
+ error = vn_rdwr(UIO_WRITE, dir_node->vnode,
+ fid, fidsize, diroffset,
+ UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
+ FSCRED, NULL, NULL);
+ if (error)
+ goto error_out;
+
+ /* get reference count of attached node */
+ if (udf_node->fe) {
+ refcnt = le16toh(udf_node->fe->link_cnt);
+ } else {
+ KASSERT(udf_node->efe);
+ refcnt = le16toh(udf_node->efe->link_cnt);
+ }
+#ifdef UDF_COMPLETE_DELETE
+ /* substract reference counter in attached node */
+ refcnt -= 1;
+ if (udf_node->fe) {
+ udf_node->fe->link_cnt = htole16(refcnt);
+ } else {
+ udf_node->efe->link_cnt = htole16(refcnt);
+ }
+
+ /* prevent writeout when refcnt == 0 */
+ if (refcnt == 0)
+ udf_node->i_flags |= IN_DELETED;
+
+ if (fid->file_char & UDF_FILE_CHAR_DIR) {
+ int drefcnt;
+
+ /* substract reference counter in directory node */
+ /* note subtract 2 (?) for its was also backreferenced */
+ if (dir_node->fe) {
+ drefcnt = le16toh(dir_node->fe->link_cnt);
+ drefcnt -= 1;
+ dir_node->fe->link_cnt = htole16(drefcnt);
+ } else {
+ KASSERT(dir_node->efe);
+ drefcnt = le16toh(dir_node->efe->link_cnt);
+ drefcnt -= 1;
+ dir_node->efe->link_cnt = htole16(drefcnt);
+ }
+ }
+
+ udf_node->i_flags |= IN_MODIFIED;
+ dir_node->i_flags |= IN_MODIFIED;
+#endif
+ /* if it is/was a hardlink adjust the file count */
+ if (refcnt > 0)
+ udf_adjust_filecount(udf_node, -1);
+
+ /* remove from the dirhash */
+ dirhash_remove(dirh, dirent, diroffset,
+ udf_fidsize(fid));
+
+error_out:
+ free(fid, M_UDFTEMP);
+ free(dirent, M_UDFTEMP);
+
+ dirhash_put(dir_node->dir_hash);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_dir_update_rootentry(struct udf_mount *ump, struct udf_node *dir_node,
+ struct udf_node *new_parent_node)
+{
+ struct vnode *dvp = dir_node->vnode;
+ struct dirhash *dirh;
+ struct dirhash_entry *dirh_ep;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct fileid_desc *fid;
+ struct dirent *dirent;
+ uint64_t file_size, diroffset;
+ uint64_t new_parent_unique_id;
+ uint32_t lb_size, fidsize;
+ int found, error;
+ char const *name = "..";
+ int namelen = 2;
+ int hit;
+
+ /* get our dirhash and make sure its read in */
+ dirhash_get(&dir_node->dir_hash);
+ error = dirhash_fill(dir_node);
+ if (error) {
+ dirhash_put(dir_node->dir_hash);
+ return (error);
+ }
+ dirh = dir_node->dir_hash;
+
+ /* get new parent's unique ID */
+ fe = new_parent_node->fe;
+ efe = new_parent_node->efe;
+ if (fe) {
+ new_parent_unique_id = le64toh(fe->unique_id);
+ } else {
+ assert(efe);
+ new_parent_unique_id = le64toh(efe->unique_id);
+ }
+
+ /* get directory filesize */
+ fe = dir_node->fe;
+ efe = dir_node->efe;
+ if (fe) {
+ file_size = le64toh(fe->inf_len);
+ } else {
+ assert(efe);
+ file_size = le64toh(efe->inf_len);
+ }
+
+ /* allocate temporary space for fid */
+ lb_size = le32toh(dir_node->ump->logical_vol->lb_size);
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+ dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK);
+
+ /*
+ * NOTE the standard does not dictate the FID entry '..' should be
+ * first, though in practice it will most likely be.
+ */
+
+ /* search our dirhash hits */
+ found = 0;
+ dirh_ep = NULL;
+ for (;;) {
+ hit = dirhash_lookup(dirh, name, namelen, &dirh_ep);
+ /* if no hit, abort the search */
+ if (!hit)
+ break;
+
+ /* check this hit */
+ diroffset = dirh_ep->offset;
+
+ /* transfer a new fid/dirent */
+ error = udf_read_fid_stream(dvp, &diroffset, fid, dirent);
+ if (error)
+ break;
+
+ /* see if its our entry */
+ KASSERT(dirent->d_namlen == namelen);
+ if (strncmp(dirent->d_name, name, namelen) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ error = ENOENT;
+ if (error)
+ goto error_out;
+
+ /* update our ICB to the new parent, hit of lower 32 bits of uniqueid */
+ fid->icb = new_parent_node->write_loc;
+ fid->icb.longad_uniqueid = htole32(new_parent_unique_id);
+
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
+
+ /* get size of fid and compensate for the read_fid_stream advance */
+ fidsize = udf_fidsize(fid);
+ diroffset -= fidsize;
+
+ /* write out */
+ error = vn_rdwr(UIO_WRITE, dir_node->vnode,
+ fid, fidsize, diroffset,
+ UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
+ FSCRED, NULL, NULL);
+
+ /* nothing to be done in the dirhash */
+
+error_out:
+ free(fid, M_UDFTEMP);
+ free(dirent, M_UDFTEMP);
+
+ dirhash_put(dir_node->dir_hash);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * We are not allowed to split the fid tag itself over an logical block so
+ * check the space remaining in the logical block.
+ *
+ * We try to select the smallest candidate for recycling or when none is
+ * found, append a new one at the end of the directory.
+ */
+
+int
+udf_dir_attach(struct udf_mount *ump, struct udf_node *dir_node,
+ struct udf_node *udf_node, struct vattr *vap, struct componentname *cnp)
+{
+ struct vnode *dvp = dir_node->vnode;
+ struct dirhash *dirh;
+ struct dirhash_entry *dirh_ep;
+ struct fileid_desc *fid;
+ struct icb_tag *icbtag;
+ struct charspec osta_charspec;
+ struct dirent dirent;
+ uint64_t unique_id, dir_size;
+ uint64_t fid_pos, end_fid_pos, chosen_fid_pos;
+ uint32_t chosen_size, chosen_size_diff;
+ int lb_size, lb_rest, fidsize, this_fidsize, size_diff;
+ int file_char, refcnt, icbflags, addr_type, hit, error;
+
+ /* get our dirhash and make sure its read in */
+ dirhash_get(&dir_node->dir_hash);
+ error = dirhash_fill(dir_node);
+ if (error) {
+ dirhash_put(dir_node->dir_hash);
+ return (error);
+ }
+ dirh = dir_node->dir_hash;
+
+ /* get info */
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ udf_osta_charset(&osta_charspec);
+
+ if (dir_node->fe) {
+ dir_size = le64toh(dir_node->fe->inf_len);
+ icbtag = &dir_node->fe->icbtag;
+ } else {
+ dir_size = le64toh(dir_node->efe->inf_len);
+ icbtag = &dir_node->efe->icbtag;
+ }
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ if (udf_node->fe) {
+ unique_id = le64toh(udf_node->fe->unique_id);
+ refcnt = le16toh(udf_node->fe->link_cnt);
+ } else {
+ unique_id = le64toh(udf_node->efe->unique_id);
+ refcnt = le16toh(udf_node->efe->link_cnt);
+ }
+
+ if (refcnt > 0) {
+ unique_id = udf_advance_uniqueid(ump);
+ udf_adjust_filecount(udf_node, 1);
+ }
+
+ /* determine file characteristics */
+ file_char = 0; /* visible non deleted file and not stream metadata */
+ if (vap->va_type == VDIR)
+ file_char = UDF_FILE_CHAR_DIR;
+
+ /* malloc scrap buffer */
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK|M_ZERO);
+
+ /* calculate _minimum_ fid size */
+ unix_to_udf_name((char *) fid->data, &fid->l_fi,
+ cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
+ fidsize = UDF_FID_SIZE + fid->l_fi;
+ fidsize = (fidsize + 3) & ~3; /* multiple of 4 */
+
+ /* find position that will fit the FID */
+ chosen_fid_pos = dir_size;
+ chosen_size = 0;
+ chosen_size_diff = UINT_MAX;
+
+ /* shut up gcc */
+ dirent.d_namlen = 0;
+
+ /* search our dirhash hits */
+ error = 0;
+ dirh_ep = NULL;
+ for (;;) {
+ hit = dirhash_lookup_freed(dirh, fidsize, &dirh_ep);
+ /* if no hit, abort the search */
+ if (!hit)
+ break;
+
+ /* check this hit for size */
+ this_fidsize = dirh_ep->entry_size;
+
+ /* check this hit */
+ fid_pos = dirh_ep->offset;
+ end_fid_pos = fid_pos + this_fidsize;
+ size_diff = this_fidsize - fidsize;
+ lb_rest = lb_size - (end_fid_pos % lb_size);
+
+#ifndef UDF_COMPLETE_DELETE
+ /* transfer a new fid/dirent */
+ error = udf_read_fid_stream(vp, &fid_pos, fid, dirent);
+ if (error)
+ goto error_out;
+
+ /* only reuse entries that are wiped */
+ /* check if the len + loc are marked zero */
+ if (le32toh(fid->icb.len) != 0)
+ continue;
+ if (le32toh(fid->icb.loc.lb_num) != 0)
+ continue;
+ if (le16toh(fid->icb.loc.part_num) != 0)
+ continue;
+#endif /* UDF_COMPLETE_DELETE */
+
+ /* select if not splitting the tag and its smaller */
+ if ((size_diff >= 0) &&
+ (size_diff < chosen_size_diff) &&
+ (lb_rest >= sizeof(struct desc_tag)))
+ {
+ /* UDF 2.3.4.2+3 specifies rules for iu size */
+ if ((size_diff == 0) || (size_diff >= 32)) {
+ chosen_fid_pos = fid_pos;
+ chosen_size = this_fidsize;
+ chosen_size_diff = size_diff;
+ }
+ }
+ }
+
+
+ /* extend directory if no other candidate found */
+ if (chosen_size == 0) {
+ chosen_fid_pos = dir_size;
+ chosen_size = fidsize;
+ chosen_size_diff = 0;
+
+ /* special case UDF 2.00+ 2.3.4.4, no splitting up fid tag */
+ if (addr_type == UDF_ICB_INTERN_ALLOC) {
+ /* pre-grow directory to see if we're to switch */
+ udf_grow_node(dir_node, dir_size + chosen_size);
+
+ icbflags = le16toh(icbtag->flags);
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+ }
+
+ /* make sure the next fid desc_tag won't be split */
+ if (addr_type != UDF_ICB_INTERN_ALLOC) {
+ end_fid_pos = chosen_fid_pos + chosen_size;
+ lb_rest = lb_size - (end_fid_pos % lb_size);
+
+ /* pad with implementation use regid if needed */
+ if (lb_rest < sizeof(struct desc_tag))
+ chosen_size += 32;
+ }
+ }
+ chosen_size_diff = chosen_size - fidsize;
+
+ /* populate the FID */
+ memset(fid, 0, lb_size);
+ udf_inittag(ump, &fid->tag, TAGID_FID, 0);
+ fid->file_version_num = htole16(1); /* UDF 2.3.4.1 */
+ fid->file_char = file_char;
+ fid->icb = udf_node->loc;
+ fid->icb.longad_uniqueid = htole32((uint32_t) unique_id);
+ fid->l_iu = htole16(0);
+
+ if (chosen_size > fidsize) {
+ /* insert implementation-use regid to space it correctly */
+ fid->l_iu = htole16(chosen_size_diff);
+
+ /* set implementation use */
+ udf_set_regid((struct regid *) fid->data, IMPL_NAME);
+ udf_add_impl_regid(ump, (struct regid *) fid->data);
+ }
+
+ /* fill in name */
+ unix_to_udf_name((char *) fid->data + le16toh(fid->l_iu),
+ &fid->l_fi, cnp->cn_nameptr, cnp->cn_namelen, &osta_charspec);
+
+ fid->tag.desc_crc_len = htole16(chosen_size - UDF_DESC_TAG_LENGTH);
+ (void) udf_validate_tag_and_crc_sums((union dscrptr *) fid);
+
+ /* writeout FID/update parent directory */
+ error = vn_rdwr(UIO_WRITE, dvp,
+ fid, chosen_size, chosen_fid_pos,
+ UIO_SYSSPACE, IO_ALTSEMANTICS | IO_NODELOCKED,
+ FSCRED, NULL, NULL);
+
+ if (error)
+ goto error_out;
+
+ /* add reference counter in attached node */
+ if (udf_node->fe) {
+ refcnt = le16toh(udf_node->fe->link_cnt);
+ udf_node->fe->link_cnt = htole16(refcnt+1);
+ } else {
+ KASSERT(udf_node->efe);
+ refcnt = le16toh(udf_node->efe->link_cnt);
+ udf_node->efe->link_cnt = htole16(refcnt+1);
+ }
+
+ /* mark not deleted if it was... just in case, but do warn */
+ if (udf_node->i_flags & IN_DELETED) {
+ printf("udf: warning, marking a file undeleted\n");
+ udf_node->i_flags &= ~IN_DELETED;
+ }
+
+ if (file_char & UDF_FILE_CHAR_DIR) {
+ /* add reference counter in directory node for '..' */
+ if (dir_node->fe) {
+ refcnt = le16toh(dir_node->fe->link_cnt);
+ refcnt++;
+ dir_node->fe->link_cnt = htole16(refcnt);
+ } else {
+ KASSERT(dir_node->efe);
+ refcnt = le16toh(dir_node->efe->link_cnt);
+ refcnt++;
+ dir_node->efe->link_cnt = htole16(refcnt);
+ }
+ }
+
+ /* append to the dirhash */
+ dirent.d_namlen = cnp->cn_namelen;
+ memcpy(dirent.d_name, cnp->cn_nameptr, cnp->cn_namelen);
+ dirhash_enter(dirh, &dirent, chosen_fid_pos,
+ udf_fidsize(fid), 1);
+
+ /* note updates */
+ udf_node->i_flags |= IN_CHANGE | IN_MODIFY; /* | IN_CREATE? */
+ /* VN_KNOTE(udf_node, ...) */
+ udf_update(udf_node->vnode, NULL, NULL, NULL, 0);
+
+error_out:
+ free(fid, M_UDFTEMP);
+
+ dirhash_put(dir_node->dir_hash);
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Each node can have an attached streamdir node though not recursively. These
+ * are otherwise known as named substreams/named extended attributes that have
+ * no size limitations.
+ *
+ * `Normal' extended attributes are indicated with a number and are recorded
+ * in either the fe/efe descriptor itself for small descriptors or recorded in
+ * the attached extended attribute file. Since these spaces can get
+ * fragmented, care ought to be taken.
+ *
+ * Since the size of the space reserved for allocation descriptors is limited,
+ * there is a mechanim provided for extending this space; this is done by a
+ * special extent to allow schrinking of the allocations without breaking the
+ * linkage to the allocation extent descriptor.
+ */
+
+int
+udf_get_node(struct udf_mount *ump, ino_t ino, struct udf_node **ppunode)
+{
+ union dscrptr *dscr;
+ struct long_ad icb_loc, last_fe_icb_loc;
+ struct udf_node *udf_node;
+ uint64_t file_size;
+ int dscr_type, strat, strat4096, needs_indirect, slot, eof, error;
+ uint32_t lb_size, sector, dummy;
+ uint8_t *file_data;
+
+ DPRINTF(NODE, ("udf_get_node called\n"));
+
+ udf_get_node_longad(ino, &icb_loc);
+
+
+ /* garbage check: translate udf_node_icb_loc to sectornr */
+ error = udf_translate_vtop(ump, &icb_loc, &sector, &dummy);
+ if (error) {
+ return (EINVAL);
+ }
+
+ /* initialise crosslinks, note location of fe/efe for hashing */
+ udf_node = udf_alloc_node();
+ udf_node->hash_id = ino;
+ udf_node->ump = ump;
+ udf_node->loc = icb_loc;
+ udf_node->lockf = 0;
+/* mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE); */
+/* cv_init(&udf_node->node_lock, "udf_nlk"); */
+ udf_node->outstanding_bufs = 0;
+ udf_node->outstanding_nodedscr = 0;
+ udf_node->uncommitted_lbs = 0;
+
+ /* safe to unlock, the entry is in the hash table, vnode is locked */
+/* mutex_exit(&ump->get_node_lock); */
+
+ needs_indirect = 0;
+ strat4096 = 0;
+ file_size = 0;
+ file_data = NULL;
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ do {
+ /* try to read in fe/efe */
+ /* error = udf_read_logvol_dscr(ump, &icb_loc, &dscr); */
+ error = udf_translate_vtop(ump, &icb_loc, &sector, &dummy);
+ if (!error)
+ error = udf_read_phys_dscr(ump, sector, M_UDFTEMP,
+ &dscr);
+
+ /* blank sector marks end of sequence, check this */
+ if ((dscr == NULL) && (!strat4096))
+ error = ENOENT;
+
+ /* break if read error or blank sector */
+ if (error || (dscr == NULL))
+ break;
+
+ /* process descriptor based on the descriptor type */
+ dscr_type = le16toh(dscr->tag.id);
+
+ /* if dealing with an indirect entry, follow the link */
+ if (dscr_type == TAGID_INDIRECTENTRY) {
+ needs_indirect = 0;
+ /* udf_free_logvol_dscr(ump, &icb_loc, dscr); */
+ free(dscr, M_UDFTEMP);
+ /* end of udf_free_logvol_decr replacement */
+ icb_loc = dscr->inde.indirect_icb;
+ continue;
+ }
+
+ /* only file entries and extended file entries allowed here */
+ if ((dscr_type != TAGID_FENTRY) &&
+ (dscr_type != TAGID_EXTFENTRY)) {
+ /* udf_free_logvol_dscr(ump, &icb_loc, dscr); */
+ free(dscr, M_UDFTEMP);
+ /* end of udf_free_logvol_decr replacement */
+ error = ENOENT;
+ break;
+ }
+
+ /* KASSERT(udf_tagsize(dscr, lb_size) == lb_size); */
+
+ /* choose this one */
+ last_fe_icb_loc = icb_loc;
+
+ /* record and process/update (ext)fentry */
+ file_data = NULL;
+ if (dscr_type == TAGID_FENTRY) {
+ /*if (udf_node->fe)
+ udf_free_logvol_dscr(ump, &last_fe_icb_loc,
+ udf_node->fe); */
+ if (udf_node->fe)
+ free(udf_node->fe, M_UDFTEMP);
+ /* end of udf_free_logvol_decr replacement */
+ udf_node->fe = &dscr->fe;
+ strat = le16toh(udf_node->fe->icbtag.strat_type);
+ file_size = le64toh(udf_node->fe->inf_len);
+ file_data = udf_node->fe->data;
+ } else {
+ /*if (udf_node->efe)
+ udf_free_logvol_dscr(ump, &last_fe_icb_loc,
+ udf_node->efe); */
+ if (udf_node->efe)
+ free(udf_node->efe, M_UDFTEMP);
+ /* end of udf_free_logvol_decr replacement */
+ udf_node->efe = &dscr->efe;
+ strat = le16toh(udf_node->efe->icbtag.strat_type);
+ file_size = le64toh(udf_node->efe->inf_len);
+ file_data = udf_node->efe->data;
+ }
+
+ /* check recording strategy (structure) */
+
+ /*
+ * Strategy 4096 is a daisy linked chain terminating with an
+ * unrecorded sector or a TERM descriptor. The next
+ * descriptor is to be found in the sector that follows the
+ * current sector.
+ */
+ if (strat == 4096) {
+ strat4096 = 1;
+ needs_indirect = 1;
+
+ icb_loc.loc.lb_num = le32toh(icb_loc.loc.lb_num) + 1;
+ }
+
+ /*
+ * Strategy 4 is the normal strategy and terminates, but if
+ * we're in strategy 4096, we can't have strategy 4 mixed in
+ */
+
+ if (strat == 4) {
+ if (strat4096) {
+ error = EINVAL;
+ break;
+ }
+ break; /* done */
+ }
+ } while (!error);
+
+ /* first round of cleanup code */
+ if (error) {
+ udf_dispose_node(udf_node);
+ return (EINVAL);
+ }
+
+ /* assert no references to dscr anymore beyong this point */
+ /* assert((udf_node->fe) || (udf_node->efe)); */
+ dscr = NULL;
+
+ /*
+ * Remember where to record an updated version of the descriptor. If
+ * there is a sequence of indirect entries, icb_loc will have been
+ * updated. Its the write disipline to allocate new space and to make
+ * sure the chain is maintained.
+ *
+ * `needs_indirect' flags if the next location is to be filled with
+ * with an indirect entry.
+ */
+ udf_node->write_loc = icb_loc;
+ udf_node->needs_indirect = needs_indirect;
+
+ /*
+ * Go trough all allocations extents of this descriptor and when
+ * encountering a redirect read in the allocation extension. These are
+ * daisy-chained.
+ */
+ UDF_LOCK_NODE(udf_node, 0);
+ udf_node->num_extensions = 0;
+
+ error = 0;
+ slot = 0;
+ for (;;) {
+ udf_get_adslot(udf_node, slot, &icb_loc, &eof);
+ if (eof)
+ break;
+ slot++;
+
+ if (UDF_EXT_FLAGS(le32toh(icb_loc.len)) != UDF_EXT_REDIRECT)
+ continue;
+
+ if (udf_node->num_extensions >= UDF_MAX_ALLOC_EXTENTS) {
+ error = EINVAL;
+ break;
+ }
+
+ /* length can only be *one* lb : UDF 2.50/2.3.7.1 */
+ if (UDF_EXT_LEN(le32toh(icb_loc.len)) != lb_size) {
+ error = EINVAL;
+ break;
+ }
+
+ /* load in allocation extent */
+ /* error = udf_read_logvol_dscr(ump, &icb_loc, &dscr); */
+ error = udf_translate_vtop(ump, &icb_loc, &sector, &dummy);
+ if (!error)
+ error = udf_read_phys_dscr(ump, sector, M_UDFTEMP,
+ &dscr);
+ if (error || (dscr == NULL))
+ break;
+
+ /* process read-in descriptor */
+ dscr_type = le16toh(dscr->tag.id);
+
+ if (dscr_type != TAGID_ALLOCEXTENT) {
+ /* udf_free_logvol_dscr(ump, &icb_loc, dscr); */
+ free(dscr, M_UDFTEMP);
+ /* end of udf_free_logvol_decr replacement */
+ error = ENOENT;
+ break;
+ }
+
+ udf_node->ext[udf_node->num_extensions] = &dscr->aee;
+ udf_node->ext_loc[udf_node->num_extensions] = icb_loc;
+
+ udf_node->num_extensions++;
+
+ } /* while */
+ UDF_UNLOCK_NODE(udf_node, 0);
+
+ /* second round of cleanup code */
+ if (error) {
+ /* recycle udf_node */
+ udf_dispose_node(udf_node);
+ return (EINVAL); /* error code ok? */
+ }
+
+ /* TODO ext attr and streamdir udf_nodes */
+
+ *ppunode = udf_node;
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+#if 0
+int
+udf_writeout_node(struct udf_node *udf_node, int waitfor)
+{
+ union dscrptr *dscr;
+ struct long_ad *loc;
+ int extnr, error;
+
+ DPRINTF(NODE, ("udf_writeout_node called\n"));
+
+ KASSERT(udf_node->outstanding_bufs == 0);
+ KASSERT(udf_node->outstanding_nodedscr == 0);
+
+ KASSERT(LIST_EMPTY(&udf_node->vnode->v_dirtyblkhd));
+
+ if (udf_node->i_flags & IN_DELETED) {
+ DPRINTF(NODE, ("\tnode deleted; not writing out\n"));
+ udf_cleanup_reservation(udf_node);
+ return (0);
+ }
+
+ /* lock node; unlocked in callback */
+ UDF_LOCK_NODE(udf_node, 0);
+
+ /* remove pending reservations, we're written out */
+ udf_cleanup_reservation(udf_node);
+
+ /* at least one descriptor writeout */
+ udf_node->outstanding_nodedscr = 1;
+
+ /* we're going to write out the descriptor so clear the flags */
+ udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED);
+
+ /* if we were rebuild, write out the allocation extents */
+ if (udf_node->i_flags & IN_NODE_REBUILD) {
+ /* mark outstanding node descriptors and issue them */
+ udf_node->outstanding_nodedscr += udf_node->num_extensions;
+ for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
+ loc = &udf_node->ext_loc[extnr];
+ dscr = (union dscrptr *) udf_node->ext[extnr];
+ error = udf_write_logvol_dscr(udf_node, dscr, loc, 0);
+ if (error)
+ return (error);
+ }
+ /* mark allocation extents written out */
+ udf_node->i_flags &= ~(IN_NODE_REBUILD);
+ }
+
+ if (udf_node->fe) {
+ KASSERT(udf_node->efe == NULL);
+ dscr = (union dscrptr *) udf_node->fe;
+ } else {
+ KASSERT(udf_node->efe);
+ KASSERT(udf_node->fe == NULL);
+ dscr = (union dscrptr *) udf_node->efe;
+ }
+ KASSERT(dscr);
+
+ loc = &udf_node->write_loc;
+ error = udf_write_logvol_dscr(udf_node, dscr, loc, waitfor);
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_dispose_node(struct udf_node *udf_node)
+{
+ //struct vnode *vp;
+ int extnr;
+
+ DPRINTF(NODE, ("udf_dispose_node called on node %p\n", udf_node));
+ if (!udf_node) {
+ DPRINTF(NODE, ("UDF: Dispose node on node NULL, ignoring\n"));
+ return (0);
+ }
+#if 0
+ vp = udf_node->vnode;
+#ifdef DIAGNOSTIC
+ if (vp->v_numoutput)
+ panic("disposing UDF node with pending I/O's, udf_node = %p, "
+ "v_numoutput = %d", udf_node, vp->v_numoutput);
+#endif
+ udf_cleanup_reservation(udf_node);
+
+ /* TODO extended attributes and streamdir */
+
+ /* remove dirhash if present */
+ dirhash_purge(&udf_node->dir_hash);
+
+ /* remove from our hash lookup table */
+ udf_deregister_node(udf_node);
+
+ /* destroy our lock */
+ mutex_destroy(&udf_node->node_mutex);
+ cv_destroy(&udf_node->node_lock);
+
+ /* dissociate our udf_node from the vnode */
+ genfs_node_destroy(udf_node->vnode);
+ vp->v_data = NULL;
+#endif
+
+ /* free associated memory and the node itself */
+ for (extnr = 0; extnr < udf_node->num_extensions; extnr++) {
+ /*udf_free_logvol_dscr(udf_node->ump, &udf_node->ext_loc[extnr],
+ udf_node->ext[extnr]); */
+ free(udf_node->ext[extnr], M_UDFTEMP);
+ /*end of udf_free_logvol_decr replacement */
+ udf_node->ext[extnr] = (void *) 0xdeadcccc;
+ }
+
+ /*if (udf_node->fe)
+ udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
+ udf_node->fe); */
+ if (udf_node->fe)
+ free(udf_node->fe, M_UDFTEMP);
+ /*end of udf_free_logvol_decr replacement */
+
+ /*if (udf_node->efe)
+ udf_free_logvol_dscr(udf_node->ump, &udf_node->loc,
+ udf_node->efe); */
+ if (udf_node->efe)
+ free(udf_node->efe, M_UDFTEMP);
+ /*end of udf_free_logvol_decr replacement */
+
+ udf_node->fe = (void *) 0xdeadaaaa;
+ udf_node->efe = (void *) 0xdeadbbbb;
+ udf_node->ump = (void *) 0xdeadbeef;
+ udf_free_node(udf_node);
+
+ return (0);
+}
+
+
+/*
+ * create a new node using the specified vnodeops, vap and cnp but with the
+ * udf_file_type. This allows special files to be created. Use with care.
+ */
+
+#if 0
+static int
+udf_create_node_raw(struct vnode *dvp, struct vnode **vpp, int udf_file_type,
+ int (**vnodeops)(void *), struct vattr *vap, struct componentname *cnp)
+{
+ union dscrptr *dscr;
+ struct udf_node *dir_node = VTOI(dvp);
+ struct udf_node *udf_node;
+ struct udf_mount *ump = dir_node->ump;
+ struct vnode *nvp;
+ struct long_ad node_icb_loc;
+ uint64_t parent_unique_id;
+ uint64_t lmapping;
+ uint32_t lb_size, lb_num;
+ uint16_t vpart_num;
+ uid_t uid;
+ gid_t gid, parent_gid;
+ int fid_size, error;
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ *vpp = NULL;
+
+ /* allocate vnode */
+ error = getnewvnode(VT_UDF, ump->vfs_mountp, vnodeops, &nvp);
+ if (error)
+ return (error);
+
+ /* lock node */
+ error = vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY);
+ if (error)
+ goto error_out_unget;
+
+ /* reserve space for one logical block */
+ vpart_num = ump->node_part;
+ error = udf_reserve_space(ump, NULL, UDF_C_NODE,
+ vpart_num, 1, /* can_fail */ true);
+ if (error)
+ goto error_out_unlock;
+
+ /* allocate node */
+ error = udf_allocate_space(ump, NULL, UDF_C_NODE,
+ vpart_num, 1, &lmapping);
+ if (error)
+ goto error_out_unreserve;
+ lb_num = lmapping;
+
+ /* initialise pointer to location */
+ memset(&node_icb_loc, 0, sizeof(struct long_ad));
+ node_icb_loc.len = htole32(lb_size);
+ node_icb_loc.loc.lb_num = htole32(lb_num);
+ node_icb_loc.loc.part_num = htole16(vpart_num);
+
+ /* build udf_node (do initialise!) */
+ udf_node = pool_get(&udf_node_pool, PR_WAITOK);
+ memset(udf_node, 0, sizeof(struct udf_node));
+
+ /* initialise crosslinks, note location of fe/efe for hashing */
+ /* bugalert: synchronise with udf_get_node() */
+ udf_node->ump = ump;
+ udf_node->vnode = nvp;
+ nvp->v_data = udf_node;
+ udf_node->loc = node_icb_loc;
+ udf_node->write_loc = node_icb_loc;
+ udf_node->lockf = 0;
+ mutex_init(&udf_node->node_mutex, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&udf_node->node_lock, "udf_nlk");
+ udf_node->outstanding_bufs = 0;
+ udf_node->outstanding_nodedscr = 0;
+ udf_node->uncommitted_lbs = 0;
+
+ /* initialise genfs */
+ genfs_node_init(nvp, &udf_genfsops);
+
+ /* insert into the hash lookup */
+ udf_register_node(udf_node);
+
+ /* get parent's unique ID for refering '..' if its a directory */
+ if (dir_node->fe) {
+ parent_unique_id = le64toh(dir_node->fe->unique_id);
+ parent_gid = (gid_t) le32toh(dir_node->fe->gid);
+ } else {
+ parent_unique_id = le64toh(dir_node->efe->unique_id);
+ parent_gid = (gid_t) le32toh(dir_node->efe->gid);
+ }
+
+ /* get descriptor */
+ udf_create_logvol_dscr(ump, udf_node, &node_icb_loc, &dscr);
+
+ /* choose a fe or an efe for it */
+ if (le16toh(ump->logical_vol->tag.descriptor_ver) == 2) {
+ udf_node->fe = &dscr->fe;
+ fid_size = udf_create_new_fe(ump, udf_node->fe,
+ udf_file_type, &udf_node->loc,
+ &dir_node->loc, parent_unique_id);
+ /* TODO add extended attribute for creation time */
+ } else {
+ udf_node->efe = &dscr->efe;
+ fid_size = udf_create_new_efe(ump, udf_node->efe,
+ udf_file_type, &udf_node->loc,
+ &dir_node->loc, parent_unique_id);
+ }
+ KASSERT(dscr->tag.tag_loc == udf_node->loc.loc.lb_num);
+
+ /* update vnode's size and type */
+ nvp->v_type = vap->va_type;
+ uvm_vnp_setsize(nvp, fid_size);
+
+ /* set access mode */
+ udf_setaccessmode(udf_node, vap->va_mode);
+
+ /* set ownership */
+ uid = kauth_cred_geteuid(cnp->cn_cred);
+ gid = parent_gid;
+ udf_setownership(udf_node, uid, gid);
+
+ error = udf_dir_attach(ump, dir_node, udf_node, vap, cnp);
+ if (error) {
+ /* free disc allocation for node */
+ udf_free_allocated_space(ump, lb_num, vpart_num, 1);
+
+ /* recycle udf_node */
+ udf_dispose_node(udf_node);
+ vput(nvp);
+
+ *vpp = NULL;
+ return (error);
+ }
+
+ /* adjust file count */
+ udf_adjust_filecount(udf_node, 1);
+
+ /* return result */
+ *vpp = nvp;
+
+ return (0);
+
+error_out_unreserve:
+ udf_do_unreserve_space(ump, NULL, vpart_num, 1);
+
+error_out_unlock:
+ VOP_UNLOCK(nvp);
+
+error_out_unget:
+ nvp->v_data = NULL;
+ ungetnewvnode(nvp);
+
+ return (error);
+}
+
+
+int
+udf_create_node(struct vnode *dvp, struct vnode **vpp, struct vattr *vap,
+ struct componentname *cnp)
+{
+ int (**vnodeops)(void *);
+ int udf_file_type;
+
+ /* what type are we creating ? */
+ vnodeops = udf_vnodeop_p;
+ /* start with a default */
+ udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
+
+ *vpp = NULL;
+
+ switch (vap->va_type) {
+ case VREG :
+ udf_file_type = UDF_ICB_FILETYPE_RANDOMACCESS;
+ break;
+ case VDIR :
+ udf_file_type = UDF_ICB_FILETYPE_DIRECTORY;
+ break;
+ case VLNK :
+ udf_file_type = UDF_ICB_FILETYPE_SYMLINK;
+ break;
+ case VBLK :
+ udf_file_type = UDF_ICB_FILETYPE_BLOCKDEVICE;
+ /* specfs */
+ return (ENOTSUP);
+ break;
+ case VCHR :
+ udf_file_type = UDF_ICB_FILETYPE_CHARDEVICE;
+ /* specfs */
+ return (ENOTSUP);
+ break;
+ case VFIFO :
+ udf_file_type = UDF_ICB_FILETYPE_FIFO;
+ /* specfs */
+ return (ENOTSUP);
+ break;
+ case VSOCK :
+ udf_file_type = UDF_ICB_FILETYPE_SOCKET;
+ /* specfs */
+ return (ENOTSUP);
+ break;
+ case VNON :
+ case VBAD :
+ default :
+ /* nothing; can we even create these? */
+ return (EINVAL);
+ }
+
+ return (udf_create_node_raw(dvp, vpp, udf_file_type, vnodeops, vap, cnp));
+}
+
+/* --------------------------------------------------------------------- */
+
+static void
+udf_free_descriptor_space(struct udf_node *udf_node, struct long_ad *loc, void *mem)
+{
+ struct udf_mount *ump = udf_node->ump;
+ uint32_t lb_size, lb_num, len, num_lb;
+ uint16_t vpart_num;
+
+ /* is there really one? */
+ if (mem == NULL)
+ return;
+
+ /* got a descriptor here */
+ len = UDF_EXT_LEN(le32toh(loc->len));
+ lb_num = le32toh(loc->loc.lb_num);
+ vpart_num = le16toh(loc->loc.part_num);
+
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ num_lb = (len + lb_size -1) / lb_size;
+
+ udf_free_allocated_space(ump, lb_num, vpart_num, num_lb);
+}
+
+void
+udf_delete_node(struct udf_node *udf_node)
+{
+ void *dscr;
+ struct udf_mount *ump;
+ struct long_ad *loc;
+ int extnr, lvint, dummy;
+
+ ump = udf_node->ump;
+
+ /* paranoia check on integrity; should be open!; we could panic */
+ lvint = le32toh(udf_node->ump->logvol_integrity->integrity_type);
+ if (lvint == UDF_INTEGRITY_CLOSED)
+ printf("\tIntegrity was CLOSED!\n");
+
+ /* whatever the node type, change its size to zero */
+ (void) udf_resize_node(udf_node, 0, &dummy);
+
+ /* force it to be `clean'; no use writing it out */
+ udf_node->i_flags &= ~(IN_MODIFIED | IN_ACCESSED | IN_ACCESS |
+ IN_CHANGE | IN_UPDATE | IN_MODIFY);
+
+ /* adjust file count */
+ udf_adjust_filecount(udf_node, -1);
+
+ /*
+ * Free its allocated descriptors; memory will be released when
+ * vop_reclaim() is called.
+ */
+ loc = &udf_node->loc;
+
+ dscr = udf_node->fe;
+ udf_free_descriptor_space(udf_node, loc, dscr);
+ dscr = udf_node->efe;
+ udf_free_descriptor_space(udf_node, loc, dscr);
+
+ for (extnr = 0; extnr < UDF_MAX_ALLOC_EXTENTS; extnr++) {
+ dscr = udf_node->ext[extnr];
+ loc = &udf_node->ext_loc[extnr];
+ udf_free_descriptor_space(udf_node, loc, dscr);
+ }
+}
+
+/* --------------------------------------------------------------------- */
+
+/* set new filesize; node but be LOCKED on entry and is locked on exit */
+int
+udf_resize_node(struct udf_node *udf_node, uint64_t new_size, int *extended)
+{
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ uint64_t file_size;
+ int error;
+
+ if (fe) {
+ file_size = le64toh(fe->inf_len);
+ } else {
+ assert(udf_node->efe);
+ file_size = le64toh(efe->inf_len);
+ }
+
+ /* if not changing, we're done */
+ if (file_size == new_size)
+ return (0);
+
+ *extended = (new_size > file_size);
+ if (*extended) {
+ error = udf_grow_node(udf_node, new_size);
+ } else {
+ error = udf_shrink_node(udf_node, new_size);
+ }
+
+ return (error);
+}
+
+
+/* --------------------------------------------------------------------- */
+
+void
+udf_itimes(struct udf_node *udf_node, struct timespec *acc,
+ struct timespec *mod, struct timespec *birth)
+{
+ struct timespec now;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct filetimes_extattr_entry *ft_extattr;
+ struct timestamp *atime, *mtime, *attrtime, *ctime;
+ struct timestamp fe_ctime;
+ struct timespec cur_birth;
+ uint32_t offset, a_l;
+ uint8_t *filedata;
+ int error;
+
+ /* protect against rogue values */
+ if (!udf_node)
+ return;
+
+ fe = udf_node->fe;
+ efe = udf_node->efe;
+
+ if (!(udf_node->i_flags & (IN_ACCESS|IN_CHANGE|IN_UPDATE|IN_MODIFY)))
+ return;
+
+ /* get descriptor information */
+ if (fe) {
+ atime = &fe->atime;
+ mtime = &fe->mtime;
+ attrtime = &fe->attrtime;
+ filedata = fe->data;
+
+ /* initial save dummy setting */
+ ctime = &fe_ctime;
+
+ /* check our extended attribute if present */
+ error = udf_extattr_search_intern(udf_node,
+ UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
+ if (!error) {
+ ft_extattr = (struct filetimes_extattr_entry *)
+ (filedata + offset);
+ if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
+ ctime = &ft_extattr->times[0];
+ }
+ /* TODO create the extended attribute if not found ? */
+ } else {
+ assert(udf_node->efe);
+ atime = &efe->atime;
+ mtime = &efe->mtime;
+ attrtime = &efe->attrtime;
+ ctime = &efe->ctime;
+ }
+
+ vfs_timestamp(&now);
+
+ /* set access time */
+ if (udf_node->i_flags & IN_ACCESS) {
+ if (acc == NULL)
+ acc = &now;
+ udf_timespec_to_timestamp(acc, atime);
+ }
+
+ /* set modification time */
+ if (udf_node->i_flags & (IN_UPDATE | IN_MODIFY)) {
+ if (mod == NULL)
+ mod = &now;
+ udf_timespec_to_timestamp(mod, mtime);
+
+ /* ensure birthtime is older than set modification! */
+ udf_timestamp_to_timespec(udf_node->ump, ctime, &cur_birth);
+ if ((cur_birth.tv_sec > mod->tv_sec) ||
+ ((cur_birth.tv_sec == mod->tv_sec) &&
+ (cur_birth.tv_nsec > mod->tv_nsec))) {
+ udf_timespec_to_timestamp(mod, ctime);
+ }
+ }
+
+ /* update birthtime if specified */
+ /* XXX we asume here that given birthtime is older than mod */
+ if (birth && (birth->tv_sec != VNOVAL)) {
+ udf_timespec_to_timestamp(birth, ctime);
+ }
+
+ /* set change time */
+ if (udf_node->i_flags & (IN_CHANGE | IN_MODIFY))
+ udf_timespec_to_timestamp(&now, attrtime);
+
+ /* notify updates to the node itself */
+ if (udf_node->i_flags & (IN_ACCESS | IN_MODIFY))
+ udf_node->i_flags |= IN_ACCESSED;
+ if (udf_node->i_flags & (IN_UPDATE | IN_CHANGE))
+ udf_node->i_flags |= IN_MODIFIED;
+
+ /* clear modification flags */
+ udf_node->i_flags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_update(struct vnode *vp, struct timespec *acc,
+ struct timespec *mod, struct timespec *birth, int updflags)
+{
+ union dscrptr *dscrptr;
+ struct udf_node *udf_node = VTOI(vp);
+ struct udf_mount *ump = udf_node->ump;
+ struct regid *impl_id;
+ int mnt_async = (vp->v_mount->mnt_flag & MNT_ASYNC);
+ int waitfor, flags;
+
+#ifdef DEBUG
+ char bits[128];
+ DPRINTF(CALL, ("udf_update(node, %p, %p, %p, %d)\n", acc, mod, birth,
+ updflags));
+ snprintb(bits, sizeof(bits), IN_FLAGBITS, udf_node->i_flags);
+ DPRINTF(CALL, ("\tnode flags %s\n", bits));
+ DPRINTF(CALL, ("\t\tmnt_async = %d\n", mnt_async));
+#endif
+
+ /* set our times */
+ udf_itimes(udf_node, acc, mod, birth);
+
+ /* set our implementation id */
+ if (udf_node->fe) {
+ dscrptr = (union dscrptr *) udf_node->fe;
+ impl_id = &udf_node->fe->imp_id;
+ } else {
+ dscrptr = (union dscrptr *) udf_node->efe;
+ impl_id = &udf_node->efe->imp_id;
+ }
+
+ /* set our ID */
+ udf_set_regid(impl_id, IMPL_NAME);
+ udf_add_impl_regid(ump, impl_id);
+
+ /* update our crc! on RMW we are not allowed to change a thing */
+ udf_validate_tag_and_crc_sums(dscrptr);
+
+ /* if called when mounted readonly, never write back */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (0);
+
+ /* check if the node is dirty 'enough'*/
+ if (updflags & UPDATE_CLOSE) {
+ flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
+ } else {
+ flags = udf_node->i_flags & IN_MODIFIED;
+ }
+ if (flags == 0)
+ return (0);
+
+ /* determine if we need to write sync or async */
+ waitfor = 0;
+ if ((flags & IN_MODIFIED) && (mnt_async == 0)) {
+ /* sync mounted */
+ waitfor = updflags & UPDATE_WAIT;
+ if (updflags & UPDATE_DIROP)
+ waitfor |= UPDATE_WAIT;
+ }
+ if (waitfor)
+ return (VOP_FSYNC(vp, FSCRED, FSYNC_WAIT, 0,0));
+
+ return (0);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+
+/*
+ * Read one fid and process it into a dirent and advance to the next (*fid)
+ * has to be allocated a logical block in size, (*dirent) struct dirent length
+ */
+
+int
+udf_read_fid_stream(struct vnode *vp, uint64_t *offset,
+ struct fileid_desc *fid)
+{
+ struct udf_node *dir_node = VTOI(vp);
+ struct udf_mount *ump = dir_node->ump;
+ struct file_entry *fe = dir_node->fe;
+ struct extfile_entry *efe = dir_node->efe;
+ uint64_t file_size;
+ int enough, error;
+ uint32_t fid_size, lb_size;
+
+ KASSERT(fid, ("udf_read_fid_stream: File Identifier Descriptor is null"));
+ /* KASSERT(dirent, ("dirent == null")); */
+ KASSERT(dir_node, ("udf_read_fid_stream: udf_node is null"));
+ KASSERT(offset, ("udf_read_fid_stream: offset == 0"));
+ KASSERT(*offset != 1, ("udf_read_fid_stream: offset != 1"));
+
+ DPRINTF(FIDS, ("read_fid_stream called at offset %ju\n",
+ (uintmax_t)*offset));
+
+ /* check if we're past the end of the directory */
+ if (fe) {
+ file_size = le64toh(fe->inf_len);
+ } else {
+ /* assert(dir_node->efe); */
+ file_size = le64toh(efe->inf_len);
+ }
+ if (*offset >= file_size)
+ return (EINVAL);
+
+ /* get maximum length of FID descriptor */
+ lb_size = le32toh(ump->logical_vol->lb_size);
+
+ /* initialise return values */
+ fid_size = 0;
+ memset(fid, 0, lb_size);
+
+ enough = (file_size - (*offset) >= UDF_FID_SIZE);
+ if (!enough) {
+ /* short dir ... */
+ return (EIO);
+ }
+
+ error = udf_read_node(dir_node, (uint8_t *)fid, *offset,
+ MIN(file_size - (*offset), lb_size));
+ /*error = vn_rdwr(UIO_READ, vp,
+ fid, MIN(file_size - (*offset), lb_size), *offset,
+ UIO_SYSSPACE, IO_NODELOCKED, FSCRED, 0, // IO_ALTSEMANTICS |
+ NULL, curthread);*/
+ if (error)
+ return (error);
+
+ DPRINTF(FIDS, ("\tfid piece read in fine\n"));
+ /*
+ * Check if we got a whole descriptor.
+ * TODO Try to `resync' directory stream when something is very wrong.
+ */
+
+ /* check if our FID header is OK */
+ error = udf_check_tag(fid);
+ if (error) {
+ goto brokendir;
+ }
+ DPRINTF(FIDS, ("\ttag check ok\n"));
+
+ if (le16toh(fid->tag.id) != TAGID_FID) {
+ error = EIO;
+ goto brokendir;
+ }
+ DPRINTF(FIDS, ("\ttag checked ok: got TAGID_FID\n"));
+
+ /* check for length */
+ fid_size = udf_fidsize(fid);
+ enough = (file_size - (*offset) >= fid_size);
+ if (!enough) {
+ error = EIO;
+ goto brokendir;
+ }
+ DPRINTF(FIDS, ("\tthe complete fid is read in\n"));
+
+ /* check FID contents */
+ error = udf_check_tag_payload((union dscrptr *) fid, lb_size);
+brokendir:
+ if (error) {
+ /* note that is sometimes a bit quick to report */
+ printf("UDF: BROKEN DIRECTORY ENTRY\n");
+ /* RESYNC? */
+ /* TODO: use udf_resync_fid_stream */
+ return (EIO);
+ }
+ DPRINTF(FIDS, ("\tplayload checked ok\n"));
+
+ /* we got a whole and valid descriptor! */
+ DPRINTF(FIDS, ("\tinterpret FID\n"));
+
+ /* advance */
+ *offset += fid_size;
+
+ return (error);
+return (0);
+}
+
+
+/* --------------------------------------------------------------------- */
+#if 0
+static void
+udf_sync_pass(struct udf_mount *ump, kauth_cred_t cred, int waitfor,
+ int pass, int *ndirty)
+{
+ struct udf_node *udf_node, *n_udf_node;
+ struct vnode *vp;
+ int vdirty, error;
+ int on_type, on_flags, on_vnode;
+
+derailed:
+ KASSERT(mutex_owned(&mntvnode_lock));
+
+ DPRINTF(SYNC, ("sync_pass %d\n", pass));
+ udf_node = RBTOUDFNODE(RB_TREE_MIN(&ump->udf_node_tree));
+ for (;udf_node; udf_node = n_udf_node) {
+ DPRINTF(SYNC, ("."));
+
+ udf_node->i_flags &= ~IN_SYNCED;
+ vp = udf_node->vnode;
+
+ mutex_enter(&vp->v_interlock);
+ n_udf_node = RBTOUDFNODE(rb_tree_iterate(
+ &ump->udf_node_tree, &udf_node->rbnode,
+ RB_DIR_RIGHT));
+
+ if (n_udf_node)
+ n_udf_node->i_flags |= IN_SYNCED;
+
+ /* system nodes are not synced this way */
+ if (vp->v_vflag & VV_SYSTEM) {
+ mutex_exit(&vp->v_interlock);
+ continue;
+ }
+
+ /* check if its dirty enough to even try */
+ on_type = (waitfor == MNT_LAZY || vp->v_type == VNON);
+ on_flags = ((udf_node->i_flags &
+ (IN_ACCESSED | IN_UPDATE | IN_MODIFIED)) == 0);
+ on_vnode = LIST_EMPTY(&vp->v_dirtyblkhd)
+ && UVM_OBJ_IS_CLEAN(&vp->v_uobj);
+ if (on_type || (on_flags || on_vnode)) { /* XXX */
+ /* not dirty (enough?) */
+ mutex_exit(&vp->v_interlock);
+ continue;
+ }
+
+ mutex_exit(&mntvnode_lock);
+ error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT);
+ if (error) {
+ mutex_enter(&mntvnode_lock);
+ if (error == ENOENT)
+ goto derailed;
+ *ndirty += 1;
+ continue;
+ }
+
+ switch (pass) {
+ case 1:
+ VOP_FSYNC(vp, cred, 0 | FSYNC_DATAONLY,0,0);
+ break;
+ case 2:
+ vdirty = vp->v_numoutput;
+ if (vp->v_tag == VT_UDF)
+ vdirty += udf_node->outstanding_bufs +
+ udf_node->outstanding_nodedscr;
+ if (vdirty == 0)
+ VOP_FSYNC(vp, cred, 0,0,0);
+ *ndirty += vdirty;
+ break;
+ case 3:
+ vdirty = vp->v_numoutput;
+ if (vp->v_tag == VT_UDF)
+ vdirty += udf_node->outstanding_bufs +
+ udf_node->outstanding_nodedscr;
+ *ndirty += vdirty;
+ break;
+ }
+
+ vput(vp);
+ mutex_enter(&mntvnode_lock);
+ }
+ DPRINTF(SYNC, ("END sync_pass %d\n", pass));
+}
+
+
+void
+udf_do_sync(struct udf_mount *ump, kauth_cred_t cred, int waitfor)
+{
+ int dummy, ndirty;
+
+ mutex_enter(&mntvnode_lock);
+recount:
+ dummy = 0;
+ DPRINTF(CALL, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
+ DPRINTF(SYNC, ("issue VOP_FSYNC(DATA only) on all nodes\n"));
+ udf_sync_pass(ump, cred, waitfor, 1, &dummy);
+
+ DPRINTF(CALL, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
+ DPRINTF(SYNC, ("issue VOP_FSYNC(COMPLETE) on all finished nodes\n"));
+ udf_sync_pass(ump, cred, waitfor, 2, &dummy);
+
+ if (waitfor == MNT_WAIT) {
+ ndirty = ump->devvp->v_numoutput;
+ DPRINTF(SYNC, ("counting pending blocks: on devvp %d\n",
+ ndirty));
+ udf_sync_pass(ump, cred, waitfor, 3, &ndirty);
+ DPRINTF(SYNC, ("counted num dirty pending blocks %d\n",
+ ndirty));
+
+ if (ndirty) {
+ /* 1/4 second wait */
+ cv_timedwait(&ump->dirtynodes_cv, &mntvnode_lock,
+ hz/4);
+ goto recount;
+ }
+ }
+
+ mutex_exit(&mntvnode_lock);
+}
+
+/* --------------------------------------------------------------------- */
+
+#endif
+/*
+ * Read and write file extent in/from the buffer.
+ *
+ * The splitup of the extent into separate request-buffers is to minimise
+ * copying around as much as possible.
+ *
+ * block based file reading and writing
+ */
+
+int
+udf_read_internal(struct udf_node *node, uint8_t *blob)
+{
+ struct file_entry *fe = node->fe;
+ struct extfile_entry *efe = node->efe;
+ struct udf_mount *ump;
+ uint64_t inflen;
+ int icbflags, addr_type;
+ uint32_t sector_size;
+ uint8_t *pos;
+
+ /* get extent and do some paranoia checks */
+ ump = node->ump;
+ sector_size = ump->discinfo.sector_size;
+
+ if (fe) {
+ inflen = le64toh(fe->inf_len);
+ pos = &fe->data[0] + le32toh(fe->l_ea);
+ icbflags = le16toh(fe->icbtag.flags);
+ } else {
+ /*assert(node->efe); */
+ inflen = le64toh(efe->inf_len);
+ pos = &efe->data[0] + le32toh(efe->l_ea);
+ icbflags = le16toh(efe->icbtag.flags);
+ }
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ /*assert(addr_type == UDF_ICB_INTERN_ALLOC); */
+ /*assert(inflen < sector_size); */
+
+ /* copy out info */
+ memset(blob, 0, sector_size);
+ memcpy(blob, pos, inflen);
+
+ return (0);
+}
+
+
+#if 0
+static int
+udf_write_internal(struct udf_node *node, uint8_t *blob)
+{
+ struct udf_mount *ump;
+ struct file_entry *fe = node->fe;
+ struct extfile_entry *efe = node->efe;
+ uint64_t inflen;
+ uint32_t sector_size;
+ uint8_t *pos;
+ int icbflags, addr_type;
+
+ /* get extent and do some paranoia checks */
+ ump = node->ump;
+ sector_size = ump->discinfo.sector_size;
+
+ if (fe) {
+ inflen = le64toh(fe->inf_len);
+ pos = &fe->data[0] + le32toh(fe->l_ea);
+ icbflags = le16toh(fe->icbtag.flags);
+ } else {
+ assert(node->efe);
+ inflen = le64toh(efe->inf_len);
+ pos = &efe->data[0] + le32toh(efe->l_ea);
+ icbflags = le16toh(efe->icbtag.flags);
+ }
+ addr_type = icbflags & UDF_ICB_TAG_FLAGS_ALLOC_MASK;
+
+ assert(addr_type == UDF_ICB_INTERN_ALLOC);
+ assert(inflen < sector_size);
+
+ /* copy in blob */
+ /* memset(pos, 0, inflen); */
+ memcpy(pos, blob, inflen);
+
+ return (0);
+}
+
+
+void
+udf_read_filebuf(struct udf_node *udf_node, struct buf *buf)
+{
+ struct buf *nestbuf;
+ struct udf_mount *ump = udf_node->ump;
+ uint64_t *mapping;
+ uint64_t run_start;
+ uint32_t sector_size;
+ uint32_t buf_offset, sector, rbuflen, rblk;
+ uint32_t from, lblkno;
+ uint32_t sectors;
+ uint8_t *buf_pos;
+ int error, run_length, what;
+
+ sector_size = udf_node->ump->discinfo.sector_size;
+
+ from = buf->b_blkno;
+ sectors = buf->b_bcount / sector_size;
+
+ what = udf_get_c_type(udf_node);
+
+ /* assure we have enough translation slots */
+ KASSERT(buf->b_bcount / sector_size <= UDF_MAX_MAPPINGS);
+ KASSERT(MAXPHYS / sector_size <= UDF_MAX_MAPPINGS);
+
+ if (sectors > UDF_MAX_MAPPINGS) {
+ printf("udf_read_filebuf: implementation limit on bufsize\n");
+ buf->b_error = EIO;
+ biodone(buf);
+ return;
+ }
+
+ mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_UDFTEMP, M_WAITOK);
+
+ error = 0;
+ DPRINTF(READ, ("\ttranslate %d-%d\n", from, sectors));
+ error = udf_translate_file_extent(udf_node, from, sectors, mapping);
+ if (error) {
+ buf->b_error = error;
+ biodone(buf);
+ goto out;
+ }
+ DPRINTF(READ, ("\ttranslate extent went OK\n"));
+
+ /* pre-check if its an internal */
+ if (*mapping == UDF_TRANS_INTERN) {
+ error = udf_read_internal(udf_node, (uint8_t *) buf->b_data);
+ if (error)
+ buf->b_error = error;
+ biodone(buf);
+ goto out;
+ }
+ DPRINTF(READ, ("\tnot intern\n"));
+
+#ifdef DEBUG
+ if (udf_verbose & UDF_DEBUG_TRANSLATE) {
+ printf("Returned translation table:\n");
+ for (sector = 0; sector < sectors; sector++) {
+ printf("%d : %"PRIu64"\n", sector, mapping[sector]);
+ }
+ }
+#endif
+
+ /* request read-in of data from disc sheduler */
+ buf->b_resid = buf->b_bcount;
+ for (sector = 0; sector < sectors; sector++) {
+ buf_offset = sector * sector_size;
+ buf_pos = (uint8_t *) buf->b_data + buf_offset;
+ DPRINTF(READ, ("\tprocessing rel sector %d\n", sector));
+
+ /* check if its zero or unmapped to stop reading */
+ switch (mapping[sector]) {
+ case UDF_TRANS_UNMAPPED:
+ case UDF_TRANS_ZERO:
+ /* copy zero sector TODO runlength like below */
+ memset(buf_pos, 0, sector_size);
+ DPRINTF(READ, ("\treturning zero sector\n"));
+ nestiobuf_done(buf, sector_size, 0);
+ break;
+ default :
+ DPRINTF(READ, ("\tread sector "
+ "%"PRIu64"\n", mapping[sector]));
+
+ lblkno = from + sector;
+ run_start = mapping[sector];
+ run_length = 1;
+ while (sector < sectors-1) {
+ if (mapping[sector+1] != mapping[sector]+1)
+ break;
+ run_length++;
+ sector++;
+ }
+
+ /*
+ * nest an iobuf and mark it for async reading. Since
+ * we're using nested buffers, they can't be cached by
+ * design.
+ */
+ rbuflen = run_length * sector_size;
+ rblk = run_start * (sector_size/DEV_BSIZE);
+
+ nestbuf = getiobuf(NULL, true);
+ nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
+ /* nestbuf is B_ASYNC */
+
+ /* identify this nestbuf */
+ nestbuf->b_lblkno = lblkno;
+ assert(nestbuf->b_vp == udf_node->vnode);
+
+ /* CD shedules on raw blkno */
+ nestbuf->b_blkno = rblk;
+ nestbuf->b_proc = NULL;
+ nestbuf->b_rawblkno = rblk;
+ nestbuf->b_udf_c_type = what;
+
+ udf_discstrat_queuebuf(ump, nestbuf);
+ }
+ }
+out:
+ /* if we're synchronously reading, wait for the completion */
+ if ((buf->b_flags & B_ASYNC) == 0)
+ biowait(buf);
+
+ DPRINTF(READ, ("\tend of read_filebuf\n"));
+ free(mapping, M_UDFTEMP);
+ return;
+}
+
+
+void
+udf_write_filebuf(struct udf_node *udf_node, struct buf *buf)
+{
+ struct buf *nestbuf;
+ struct udf_mount *ump = udf_node->ump;
+ uint64_t *mapping;
+ uint64_t run_start;
+ uint32_t lb_size;
+ uint32_t buf_offset, lb_num, rbuflen, rblk;
+ uint32_t from, lblkno;
+ uint32_t num_lb;
+ int error, run_length, what, s;
+
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+
+ from = buf->b_blkno;
+ num_lb = buf->b_bcount / lb_size;
+
+ what = udf_get_c_type(udf_node);
+
+ /* assure we have enough translation slots */
+ KASSERT(buf->b_bcount / lb_size <= UDF_MAX_MAPPINGS);
+ KASSERT(MAXPHYS / lb_size <= UDF_MAX_MAPPINGS);
+
+ if (num_lb > UDF_MAX_MAPPINGS) {
+ printf("udf_write_filebuf: implementation limit on bufsize\n");
+ buf->b_error = EIO;
+ biodone(buf);
+ return;
+ }
+
+ mapping = malloc(sizeof(*mapping) * UDF_MAX_MAPPINGS, M_UDFTEMP, M_WAITOK);
+
+ error = 0;
+ DPRINTF(WRITE, ("\ttranslate %d-%d\n", from, num_lb));
+ error = udf_translate_file_extent(udf_node, from, num_lb, mapping);
+ if (error) {
+ buf->b_error = error;
+ biodone(buf);
+ goto out;
+ }
+ DPRINTF(WRITE, ("\ttranslate extent went OK\n"));
+
+ /* if its internally mapped, we can write it in the descriptor itself */
+ if (*mapping == UDF_TRANS_INTERN) {
+ /* TODO paranoia check if we ARE going to have enough space */
+ error = udf_write_internal(udf_node, (uint8_t *) buf->b_data);
+ if (error)
+ buf->b_error = error;
+ biodone(buf);
+ goto out;
+ }
+ DPRINTF(WRITE, ("\tnot intern\n"));
+
+ /* request write out of data to disc sheduler */
+ buf->b_resid = buf->b_bcount;
+ for (lb_num = 0; lb_num < num_lb; lb_num++) {
+ buf_offset = lb_num * lb_size;
+ DPRINTF(WRITE, ("\tprocessing rel lb_num %d\n", lb_num));
+
+ /*
+ * Mappings are not that important here. Just before we write
+ * the lb_num we late-allocate them when needed and update the
+ * mapping in the udf_node.
+ */
+
+ /* XXX why not ignore the mapping altogether ? */
+ DPRINTF(WRITE, ("\twrite lb_num "
+ "%"PRIu64, mapping[lb_num]));
+
+ lblkno = from + lb_num;
+ run_start = mapping[lb_num];
+ run_length = 1;
+ while (lb_num < num_lb-1) {
+ if (mapping[lb_num+1] != mapping[lb_num]+1)
+ if (mapping[lb_num+1] != mapping[lb_num])
+ break;
+ run_length++;
+ lb_num++;
+ }
+ DPRINTF(WRITE, ("+ %d\n", run_length));
+
+ /* nest an iobuf on the master buffer for the extent */
+ rbuflen = run_length * lb_size;
+ rblk = run_start * (lb_size/DEV_BSIZE);
+
+ nestbuf = getiobuf(NULL, true);
+ nestiobuf_setup(buf, nestbuf, buf_offset, rbuflen);
+ /* nestbuf is B_ASYNC */
+
+ /* identify this nestbuf */
+ nestbuf->b_lblkno = lblkno;
+ KASSERT(nestbuf->b_vp == udf_node->vnode);
+
+ /* CD shedules on raw blkno */
+ nestbuf->b_blkno = rblk;
+ nestbuf->b_proc = NULL;
+ nestbuf->b_rawblkno = rblk;
+ nestbuf->b_udf_c_type = what;
+
+ /* increment our outstanding bufs counter */
+ s = splbio();
+ udf_node->outstanding_bufs++;
+ splx(s);
+
+ udf_discstrat_queuebuf(ump, nestbuf);
+ }
+out:
+ /* if we're synchronously writing, wait for the completion */
+ if ((buf->b_flags & B_ASYNC) == 0)
+ biowait(buf);
+
+ DPRINTF(WRITE, ("\tend of write_filebuf\n"));
+ free(mapping, M_UDFTEMP);
+ return;
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+
Index: sys/fs/udf2/udf_vfsops.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_vfsops.c
@@ -0,0 +1,987 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ * Copyright (c) 2012 Will DeVries
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/cdefs.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/fcntl.h>
+#include <sys/namei.h>
+#include <sys/proc.h>
+#include <sys/vnode.h>
+#include <sys/mount.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+#include <sys/priv.h>
+#include <sys/iconv.h>
+#include <geom/geom.h>
+#include <geom/geom_vfs.h>
+
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+#include "udf_mount.h"
+
+MALLOC_DEFINE(M_UDFTEMP, "UDF temp", "UDF allocation space");
+uma_zone_t udf_zone_node = NULL;
+
+/* verbose levels of the udf filingsystem */
+int udf_verbose = UDF_DEBUGGING;
+
+struct iconv_functions *udf2_iconv = NULL;
+
+/* internal finctions */
+static int udf_mountfs(struct vnode *, struct mount *);
+
+/* --------------------------------------------------------------------- */
+
+/* predefine vnode-op list descriptor */
+
+static vfs_init_t udf_init;
+static vfs_mount_t udf_mount;
+static vfs_root_t udf_root;
+static vfs_statfs_t udf_statfs;
+static vfs_uninit_t udf_uninit;
+static vfs_unmount_t udf_unmount;
+static vfs_fhtovp_t udf_fhtovp;
+
+static struct vfsops udf_vfsops = {
+ .vfs_init = udf_init,
+ .vfs_uninit = udf_uninit,
+ .vfs_mount = udf_mount,
+ .vfs_root = udf_root,
+ .vfs_statfs = udf_statfs,
+ .vfs_unmount = udf_unmount,
+ .vfs_fhtovp = udf_fhtovp,
+ .vfs_vget = udf_vget
+};
+VFS_SET(udf_vfsops, udf2, VFCF_READONLY);
+
+MODULE_VERSION(udf2, 1);
+
+/* --------------------------------------------------------------------- */
+
+/* file system start here */
+static int
+udf_init(struct vfsconf *notused)
+{
+ /* init node pools */
+ udf_zone_node = uma_zcreate("UDF Node Pool Zone",
+ sizeof(struct udf_node), NULL, NULL, NULL, NULL, 0, 0);
+
+ if (udf_zone_node == NULL) {
+ printf("Cannot create node pool zone.");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+static int
+udf_uninit(struct vfsconf *notused)
+{
+ /* remove pools */
+ if (udf_zone_node != NULL) {
+ uma_zdestroy(udf_zone_node);
+ udf_zone_node = NULL;
+ }
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/* if the system nodes exist, release them */
+static void
+udf_release_system_nodes(struct mount *mp)
+{
+ struct udf_mount *ump = VFSTOUDF(mp);
+ int error;
+
+ /* if we haven't even got an ump, dont bother */
+ if (!ump)
+ return;
+
+ /* VAT partition support */
+ if (ump->vat_node)
+ udf_dispose_node(ump->vat_node);
+
+ /* Metadata partition support */
+ if (ump->metadata_node)
+ udf_dispose_node(ump->metadata_node);
+ if (ump->metadatamirror_node)
+ udf_dispose_node(ump->metadatamirror_node);
+ if (ump->metadatabitmap_node)
+ udf_dispose_node(ump->metadatabitmap_node);
+
+ /* This flush should NOT write anything nor allow any node to remain */
+ if ((error = vflush(ump->vfs_mountp, 0, 0, curthread)))
+ panic("Failure to flush UDF system vnodes\n");
+}
+
+/* --------------------------------------------------------------------- */
+
+#define MPFREE(a, lst) \
+ if ((a)) free((a), lst);
+static void
+free_udf_mountinfo(struct mount *mp)
+{
+ struct udf_mount *ump;
+ int i;
+
+ if (!mp)
+ return;
+
+ ump = VFSTOUDF(mp);
+ if (ump) {
+ /* clear our data */
+ for (i = 0; i < UDF_ANCHORS; i++)
+ MPFREE(ump->anchors[i], M_UDFTEMP);
+ MPFREE(ump->primary_vol, M_UDFTEMP);
+ MPFREE(ump->logical_vol, M_UDFTEMP);
+ MPFREE(ump->unallocated, M_UDFTEMP);
+ MPFREE(ump->implementation, M_UDFTEMP);
+ MPFREE(ump->logvol_integrity, M_UDFTEMP);
+ for (i = 0; i < UDF_PARTITIONS; i++) {
+ MPFREE(ump->partitions[i], M_UDFTEMP);
+ MPFREE(ump->part_unalloc_dscr[i], M_UDFTEMP);
+ MPFREE(ump->part_freed_dscr[i], M_UDFTEMP);
+ }
+ MPFREE(ump->metadata_unalloc_dscr, M_UDFTEMP);
+
+ MPFREE(ump->fileset_desc, M_UDFTEMP);
+ MPFREE(ump->sparing_table, M_UDFTEMP);
+
+#if 0
+ MPFREE(ump->la_node_ad_cpy, M_UDFTEMP);
+ MPFREE(ump->la_pmapping, M_UDFTEMP);
+ MPFREE(ump->la_lmapping, M_UDFTEMP);
+
+ mutex_destroy(&ump->ihash_lock);
+ mutex_destroy(&ump->get_node_lock);
+ mutex_destroy(&ump->logvol_mutex);
+ mutex_destroy(&ump->allocate_mutex);
+ cv_destroy(&ump->dirtynodes_cv);
+#endif
+
+ MPFREE(ump->vat_table, M_UDFTEMP);
+
+ free(ump, M_UDFTEMP);
+ }
+}
+#undef MPFREE
+
+/* --------------------------------------------------------------------- */
+
+
+static int
+udf_mount(struct mount *mp)
+{
+ struct thread *td;
+ struct vnode *devvp;
+ struct nameidata nd;
+ struct vfsoptlist *opts;
+ int error, len;
+ char *fspec;
+
+ td = curthread;
+ opts = mp->mnt_optnew;
+ DPRINTF(CALL, ("udf_mount called\n"));
+
+ /*
+ * Unconditionally mount as read-only.
+ */
+ MNT_ILOCK(mp);
+ mp->mnt_flag |= MNT_RDONLY;
+ MNT_IUNLOCK(mp);
+
+ /* No root filesystem support. Probably not a big deal, since the
+ * bootloader doesn't understand UDF
+ */
+ if (mp->mnt_flag & MNT_ROOTFS)
+ return (ENOTSUP);
+
+ fspec = NULL;
+ error = vfs_getopt(opts, "from", (void **)&fspec, &len);
+ if (!error && fspec[len - 1] != '\0')
+ return (EINVAL);
+
+ /* handle request for updating mount parameters */
+ /* TODO can't update my mountpoint yet */
+ if (mp->mnt_flag & MNT_UPDATE)
+ return (0);
+
+ /* Check that the mount device exists */
+ if (fspec == NULL)
+ return (EINVAL);
+
+ /* lookup name to get its vnode */
+ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
+ if ((error = namei(&nd)))
+ return (error);
+ NDFREE(&nd, NDF_ONLY_PNBUF);
+ devvp = nd.ni_vp;
+
+ if (vn_isdisk(devvp, &error) == 0) {
+ vput(devvp);
+ return (error);
+ }
+
+ /* Check the access rights on the mount device */
+ error = VOP_ACCESS(devvp, VREAD, td->td_ucred, td);
+ if (error)
+ error = priv_check(td, PRIV_VFS_MOUNT_PERM);
+ if (error) {
+ vput(devvp);
+ return (error);
+ }
+
+
+ /*
+ * Open device and try to mount it!
+ */
+ if ((error = udf_mountfs(devvp, mp))) {
+ vrele(devvp);
+ return (error);
+ }
+
+ /* successfully mounted */
+ DPRINTF(VOLUMES, ("udf_mount() successfull\n"));
+#if 0
+ /* If we're not opened read-only, open its logical volume */
+ if ((mp->mnt_flag & MNT_RDONLY) == 0) {
+ if ((error = udf_open_logvol(VFSTOUDF(mp))) != 0) {
+ printf( "mount_udf: can't open logical volume for "
+ "writing, downgrading access to read-only\n");
+ mp->mnt_flag |= MNT_RDONLY;
+ /* FIXME we can't return error now on open failure */
+ return 0;
+ }
+ }
+#endif
+
+ /* TODO: Add some iconv code here. */
+
+ vfs_mountedfrom(mp, fspec);
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+#ifdef DEBUG
+static void
+udf_unmount_sanity_check(struct mount *mp)
+{
+ struct vnode *vp;
+
+ printf("On unmount, i found the following nodes:\n");
+ TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
+ vprint("", vp);
+ if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) {
+ printf(" is locked\n");
+ }
+ if (vp->v_usecount > 1)
+ printf(" more than one usecount %d\n", vp->v_usecount);
+ }
+}
+#endif
+
+
+int
+udf_unmount(struct mount *mp, int mntflags)
+{
+ struct udf_mount *ump;
+ int error, flags = 0;
+
+ DPRINTF(CALL, ("udf_unmount called\n"));
+
+ ump = VFSTOUDF(mp);
+ if (!ump)
+ panic("UDF unmount: empty ump\n");
+
+ flags = (mntflags & MNT_FORCE) ? FORCECLOSE : 0;
+
+ /* TODO remove these paranoid functions */
+#ifdef DEBUG
+ if (udf_verbose & UDF_DEBUG_LOCKING)
+ udf_unmount_sanity_check(mp);
+#endif
+
+ /*
+ * By specifying SKIPSYSTEM we can skip vnodes marked with VV_SYSTEM.
+ * This hardly documented feature allows us to exempt certain files
+ * from being flushed.
+ */
+ if ((error = vflush(mp, 0, flags, curthread)))
+ return (error);
+
+ /* update nodes and wait for completion of writeout of system nodes */
+ //udf_sync(mp, FSYNC_WAIT, NOCRED);
+
+#ifdef DEBUG
+ if (udf_verbose & UDF_DEBUG_LOCKING)
+ udf_unmount_sanity_check(mp);
+#endif
+#if 0
+ /* flush again, to check if we are still busy for something else */
+ if ((error = vflush(ump->vfs_mountp, 0, flags, curthread)))
+ return (error);
+
+ DPRINTF(VOLUMES, ("flush OK on unmount\n"));
+
+ /* close logical volume and close session if requested */
+ if ((error = udf_close_logvol(ump, mntflags)) != 0)
+ return (error);
+#endif
+
+#ifdef DEBUG
+ DPRINTF(VOLUMES, ("FINAL sanity check\n"));
+ if (udf_verbose & UDF_DEBUG_LOCKING)
+ udf_unmount_sanity_check(mp);
+#endif
+
+ /* NOTE release system nodes should NOT write anything */
+ udf_release_system_nodes(mp);
+
+#if 0
+ /* finalise disc strategy */
+ udf_discstrat_finish(ump);
+
+#endif
+ /* synchronise device caches */
+ (void) udf_synchronise_caches(ump);
+
+/* TODO: clean up iconv here */
+ if (ump->iconv_d2l)
+ udf2_iconv->close(ump->iconv_d2l);
+
+ DROP_GIANT();
+ g_topology_lock();
+ g_vfs_close(ump->geomcp);
+ g_topology_unlock();
+ PICKUP_GIANT();
+ vrele(ump->devvp);
+ dev_rel(ump->dev);
+
+ /* free our ump */
+ free_udf_mountinfo(mp);
+
+ /* free ump struct references */
+ mp->mnt_data = NULL;
+ MNT_ILOCK(mp);
+ mp->mnt_flag &= ~MNT_LOCAL;
+ MNT_IUNLOCK(mp);
+
+ DPRINTF(VOLUMES, ("Fin unmount\n"));
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Helper function of udf_mount() that actually mounts the disc.
+ */
+
+static int
+udf_mountfs(struct vnode *devvp, struct mount *mp)
+{
+ struct udf_args *args = NULL;
+ struct g_consumer *cp;
+ struct cdev *dev;
+ struct udf_mount *ump = NULL;
+ struct vfsoptlist *opts;
+ int num_anchors, error, len, *udf_flags;
+ uint32_t bshift, logvol_integrity, sector_size; /*lb_size,*/
+ char *cs_disk, *cs_local;
+ void *optdata;
+
+#if 0
+ /* flush out any old buffers remaining from a previous use. */
+ if ((error = vinvalbuf(devvp, V_SAVE, l->l_cred, l, 0, 0)))
+ return (error);
+#endif
+ opts = mp->mnt_optnew;
+
+ /* Open a consumer. */
+ dev = devvp->v_rdev;
+ dev_ref(dev);
+ DROP_GIANT();
+ g_topology_lock();
+ error = g_vfs_open(devvp, &cp, "udf2", 0);
+ g_topology_unlock();
+ PICKUP_GIANT();
+ VOP_UNLOCK(devvp, 0);
+ if (error)
+ goto fail;
+
+
+ /* setup basic mount information */
+ mp->mnt_data = NULL;
+ mp->mnt_stat.f_fsid.val[0] = dev2udev(devvp->v_rdev);
+ mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
+ mp->mnt_stat.f_namemax = UDF_MAX_NAMELEN;
+ if (devvp->v_rdev->si_iosize_max != 0)
+ mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
+ if (mp->mnt_iosize_max > MAXPHYS)
+ mp->mnt_iosize_max = MAXPHYS;
+ MNT_ILOCK(mp);
+ mp->mnt_flag |= MNT_LOCAL;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
+ MNT_IUNLOCK(mp);
+
+ /* allocate udf part of mount structure; malloc always succeeds */
+ ump = malloc(sizeof(struct udf_mount), M_UDFTEMP, M_WAITOK | M_ZERO);
+
+#if 0
+ /* init locks */
+ mutex_init(&ump->logvol_mutex, MUTEX_DEFAULT, IPL_NONE);
+ mutex_init(&ump->ihash_lock, MUTEX_DEFAULT, IPL_NONE);
+ mutex_init(&ump->get_node_lock, MUTEX_DEFAULT, IPL_NONE);
+ mutex_init(&ump->allocate_mutex, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&ump->dirtynodes_cv, "udfsync2");
+
+ /* init rbtree for nodes, ordered by their icb address (long_ad) */
+ udf_init_nodes_tree(ump);
+#endif
+
+ /* set up linkage */
+ mp->mnt_data = ump;
+ ump->vfs_mountp = mp;
+ ump->devvp = devvp;
+ ump->dev = dev;
+ ump->geomcp = cp;
+ ump->bo = &devvp->v_bufobj;
+
+ /* Load flags for later. Not sure what to use them for... */
+ udf_flags = NULL;
+ error = vfs_getopt(opts, "flags", (void **)&udf_flags, &len);
+ if (error || len != sizeof(int))
+ return (EINVAL);
+ ump->flags = *udf_flags;
+
+ optdata = NULL;
+ error = vfs_getopt(opts, "udf_args", &optdata, &len);
+ if (error || len != sizeof(struct udf_args)) {
+ error = EINVAL;
+ goto fail;
+ }
+
+ /* set up arguments and device */
+ ump->mount_args = *(struct udf_args *)optdata;
+
+ if (ump->flags & UDFMNT_KICONV && udf2_iconv) {
+ cs_disk = "UTF-16BE";
+
+ cs_local = NULL;
+ error = vfs_getopt(opts, "cs_local", (void **)&cs_local, &len);
+ if (error != 0 || cs_local[len-1] != '\0') {
+ error = EINVAL;
+ goto fail;
+ }
+
+ udf2_iconv->open(cs_local, cs_disk, &ump->iconv_d2l);
+#if 0
+ udf2_iconv->open(cs_disk, cs_local, &ump->iconv_l2d);
+#endif
+ }
+
+ if ((error = udf_update_discinfo(ump, cp->provider->sectorsize, cp->provider->mediasize))) {
+ printf("UDF mount: error inspecting fs node\n");
+ goto fail;
+ }
+
+ /* inspect sector size */
+ sector_size = ump->discinfo.sector_size;
+ bshift = 1;
+ while ((1 << bshift) < sector_size)
+ bshift++;
+ if ((1 << bshift) != sector_size) {
+ printf("UDF mount: "
+ "hit implementation fence on sector size\n");
+ error = EIO;
+ goto fail;
+
+ }
+
+ /* temporary check to overcome sectorsize >= 8192 bytes panic */
+ if (sector_size >= 8192) {
+ printf("UDF mount: "
+ "hit implementation limit, sectorsize to big\n");
+ error = EIO;
+ goto fail;
+
+ }
+
+ /*
+ * Inspect if we're asked to mount read-write on a non recordable or
+ * closed sequential disc.
+ */
+ if ((mp->mnt_flag & MNT_RDONLY) == 0) {
+ if ((ump->discinfo.mmc_cur & MMC_CAP_RECORDABLE) == 0) {
+ printf("UDF mount: disc is not recordable\n");
+ error = EROFS;
+ goto fail;
+ }
+ if (ump->discinfo.mmc_cur & MMC_CAP_SEQUENTIAL) {
+ if (ump->discinfo.disc_state == MMC_STATE_FULL) {
+ printf("UDF mount: disc is not appendable\n");
+ error = EROFS;
+ goto fail;
+ }
+
+ /*
+ * TODO if the last session is closed check if there
+ * is enough space to open/close new session
+ */
+ }
+ /* double check if we're not mounting a pervious session RW */
+
+ if (args->sessionnr != 0) {
+ printf("UDF mount: updating a previous session "
+ "not yet allowed\n");
+ error = EROFS;
+ goto fail;
+ }
+
+ }
+
+#if 0
+ /* initialise bootstrap disc strategy */
+ ump->strategy = &udf_strat_bootstrap;
+ udf_discstrat_init(ump);
+#endif
+
+ /* read all anchors to get volume descriptor sequence */
+ num_anchors = udf_read_anchors(ump);
+ if (num_anchors == 0) {
+ printf("UDF mount: error reading anchors\n");
+ error = EINVAL;
+ goto fail;
+ }
+
+ /*DPRINTF(VOLUMES, ("Read %d anchors on this disc, session %d\n",
+ num_anchors, args->sessionnr));*/
+
+ /* read in volume descriptor sequence */
+ if ((error = udf_read_vds_space(ump))) {
+ printf("UDF mount: error reading volume space\n");
+ goto fail;
+ }
+
+#if 0
+ /* close down bootstrap disc strategy */
+ udf_discstrat_finish(ump);
+#endif
+
+ /* check consistency and completeness */
+ if ((error = udf_process_vds(ump))) {
+ printf( "UDF mount: disc not properly formatted(bad VDS)\n");
+ goto fail;
+ }
+
+#if 0
+ /* switch to new disc strategy */
+ KASSERT(ump->strategy != &udf_strat_bootstrap,
+ ("ump->strategy != &udf_strat_bootstrap"));
+ udf_discstrat_init(ump);
+
+ /* initialise late allocation administration space */
+ ump->la_lmapping = malloc(sizeof(uint64_t) * UDF_MAX_MAPPINGS,
+ M_UDFTEMP, M_WAITOK);
+ ump->la_pmapping = malloc(sizeof(uint64_t) * UDF_MAX_MAPPINGS,
+ M_UDFTEMP, M_WAITOK);
+
+ /* setup node cleanup extents copy space */
+ lb_size = le32toh(ump->logical_vol->lb_size);
+ ump->la_node_ad_cpy = malloc(lb_size * UDF_MAX_ALLOC_EXTENTS,
+ M_UDFTEMP, M_WAITOK);
+ memset(ump->la_node_ad_cpy, 0, lb_size * UDF_MAX_ALLOC_EXTENTS);
+#endif
+
+ /* setup rest of mount information */
+ mp->mnt_data = ump;
+
+#if 0
+ /* bshift is allways equal to disc sector size */
+ mp->mnt_dev_bshift = bshift;
+ mp->mnt_fs_bshift = bshift;
+#endif
+
+ /* note that the mp info needs to be initialised for reading! */
+ /* read vds support tables like VAT, sparable etc. */
+ if ((error = udf_read_vds_tables(ump))) {
+ printf( "UDF mount: error in format or damaged disc "
+ "(VDS tables failing)\n");
+ goto fail;
+ }
+
+ /* check if volume integrity is closed otherwise its dirty */
+ logvol_integrity = le32toh(ump->logvol_integrity->integrity_type);
+ if (logvol_integrity != UDF_INTEGRITY_CLOSED) {
+ printf("UDF mount: file system is not clean; ");
+ printf("please fsck(8)\n");
+ error = EPERM;
+ goto fail;
+ }
+
+ /* read root directory */
+ if ((error = udf_read_rootdirs(ump))) {
+ printf( "UDF mount: "
+ "disc not properly formatted or damaged disc "
+ "(rootdirs failing)\n");
+ goto fail;
+ }
+
+ /* do we have to set this? */
+ /* devvp->v_specmountpoint = mp; */
+
+ /* success! */
+ return (0);
+
+fail:
+ if (cp != NULL) {
+ DROP_GIANT();
+ g_topology_lock();
+ g_vfs_close(cp);
+ g_topology_unlock();
+ PICKUP_GIANT();
+ }
+ dev_rel(dev);
+ if (ump != NULL) {
+ /*udf_discstrat_finish(VFSTOUDF(mp)); */
+ free_udf_mountinfo(mp);
+ }
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_root(struct mount *mp, int flags, struct vnode **vpp)
+{
+ struct long_ad *dir_loc;
+ struct udf_mount *ump = VFSTOUDF(mp);
+ ino_t ino;
+ int error;
+
+ DPRINTF(CALL, ("udf_root called\n"));
+
+ dir_loc = &ump->fileset_desc->rootdir_icb;
+ ino = udf_get_node_id(dir_loc);
+ error = udf_vget(mp, ino, flags, vpp);
+
+ if (!((*vpp)->v_vflag & VV_ROOT)) {
+ printf("NOT A ROOT NODE?");
+ return (EDOOFUS);
+ }
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_statfs(struct mount *mp, struct statfs *sbp)
+{
+ struct udf_mount *ump = VFSTOUDF(mp);
+ struct logvol_int_desc *lvid;
+ struct udf_logvol_info *impl;
+ uint64_t sizeblks, freeblks, files = 0;
+ int num_part;
+
+ DPRINTF(CALL, ("udf_statvfs called\n"));
+ /* mutex_enter(&ump->allocate_mutex); */
+ udf_calc_freespace(ump, &sizeblks, &freeblks);
+
+ lvid = ump->logvol_integrity;
+ num_part = le32toh(lvid->num_part);
+ impl = (struct udf_logvol_info *) (lvid->tables + 2*num_part);
+ if (impl) {
+ files = le32toh(impl->num_files);
+ files += le32toh(impl->num_directories);
+ }
+/* mutex_exit(&ump->allocate_mutex); */
+
+ sbp->f_version = STATFS_VERSION; /* structure version number */
+ /*uint32_t f_type;*/ /* type of filesystem */
+ sbp->f_flags = mp->mnt_flag; /* copy of mount exported flags */
+ sbp->f_bsize = ump->discinfo.sector_size; /* filesystem fragment size */
+ sbp->f_iosize = ump->discinfo.sector_size; /* optimal transfer block size */
+ sbp->f_blocks = sizeblks; /* total data blocks in filesystem */
+ sbp->f_bfree = freeblks; /* free blocks in filesystem */
+ sbp->f_bavail = 0; /* free blocks avail to non-superuser */
+ sbp->f_files = files; /* total file nodes in filesystem */
+ sbp->f_ffree = 0; /* free nodes avail to non-superuser */
+ /*uint64_t f_syncwrites;*/ /* count of sync writes since mount */
+ /*uint64_t f_asyncwrites;*/ /* count of async writes since mount */
+ /*uint64_t f_syncreads;*/ /* count of sync reads since mount */
+ /*uint64_t f_asyncreads;*/ /* count of async reads since mount */
+ /*uint64_t f_spare[10];*/ /* unused spare */
+ /*uint32_t f_namemax;*/ /* maximum filename length */
+ /*uid_t f_owner;*/ /* user that mounted the filesystem */
+ /*fsid_t f_fsid;*/ /* filesystem id */
+ /*char f_charspare[80];*/ /* spare string space */
+ /*char f_fstypename[MFSNAMELEN];*/ /* filesystem type name */
+ /*char f_mntfromname[MNAMELEN];*/ /* mounted filesystem */
+ /*char f_mntonname[MNAMELEN];*/ /* directory on which mounted */
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * TODO what about writing out free space maps, lvid etc? only on `waitfor'
+ * i.e. explicit syncing by the user?
+ */
+#if 0
+static int
+udf_sync_writeout_system_files(struct udf_mount *ump, int clearflags)
+{
+ int error;
+
+ /* XXX lock for VAT en bitmaps? */
+ /* metadata nodes are written synchronous */
+ DPRINTF(CALL, ("udf_sync: syncing metadata\n"));
+ if (ump->lvclose & UDF_WRITE_VAT)
+ udf_writeout_vat(ump);
+
+ error = 0;
+ if (ump->lvclose & UDF_WRITE_PART_BITMAPS) {
+ /* writeout metadata spacetable if existing */
+ error = udf_write_metadata_partition_spacetable(ump, MNT_WAIT);
+ if (error)
+ printf( "udf_writeout_system_files : "
+ " writeout of metadata space bitmap failed\n");
+
+ /* writeout partition spacetables */
+ error = udf_write_physical_partition_spacetables(ump, MNT_WAIT);
+ if (error)
+ printf( "udf_writeout_system_files : "
+ "writeout of space tables failed\n");
+ if (!error && clearflags)
+ ump->lvclose &= ~UDF_WRITE_PART_BITMAPS;
+ }
+
+ return (error);
+}
+
+
+int
+udf_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
+{
+ struct udf_mount *ump = VFSTOUDF(mp);
+
+ DPRINTF(CALL, ("udf_sync called\n"));
+ /* if called when mounted readonly, just ignore */
+ if (mp->mnt_flag & MNT_RDONLY)
+ return (0);
+
+ if (ump->syncing && !waitfor) {
+ printf("UDF: skipping autosync\n");
+ return (0);
+ }
+ /* get sync lock */
+ ump->syncing = 1;
+
+ /* pre-sync */
+ udf_do_sync(ump, cred, waitfor);
+
+ if (waitfor == MNT_WAIT)
+ udf_sync_writeout_system_files(ump, true);
+
+ DPRINTF(CALL, ("end of udf_sync()\n"));
+ ump->syncing = 0;
+ return (0);
+}
+
+#endif
+/* This added only for temp use */
+struct udf_node *
+udf_alloc_node()
+{
+ return (uma_zalloc(udf_zone_node, M_WAITOK | M_ZERO));
+}
+
+void
+udf_free_node(struct udf_node *unode)
+{
+ uma_zfree(udf_zone_node, unode);
+}
+/* --------------------------------------------------------------------- */
+
+/*
+ * Get vnode for the file system type specific file id ino for the fs. Its
+ * used for reference to files by unique ID and for NFSv3.
+ * (optional) TODO lookup why some sources state NFSv3
+
+ * This done as in the current udf implementation. I really have no idea
+ * if it is correct.
+ */
+int
+udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
+{
+ struct vnode *nvp;
+ struct udf_node *unode;
+ struct udf_mount *ump;
+ int error, udf_file_type;
+
+ DPRINTF(NOTIMPL, ("udf_vget called\n"));
+ error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
+ if (error || *vpp != NULL)
+ return (error);
+
+ /*
+ * We must promote to an exclusive lock for vnode creation. This
+ * can happen if lookup is passed LOCKSHARED.
+ */
+ if ((flags & LK_TYPE_MASK) == LK_SHARED) {
+ flags &= ~LK_TYPE_MASK;
+ flags |= LK_EXCLUSIVE;
+ }
+
+ ump = VFSTOUDF(mp);
+ error = udf_getanode(mp, &nvp);
+ if (error)
+ return (error);
+
+ lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL);
+ if ((error = insmntque(nvp, mp)) != 0)
+ return (error);
+
+ /* TODO: Does this leak unode or vnodes? */
+ error = vfs_hash_insert(nvp, ino, flags, curthread, vpp, NULL, NULL);
+ if (error || *vpp != NULL)
+ return (error);
+
+
+ /*
+ * Load read and set up the unode structure.
+ */
+ error = udf_get_node(ump, ino, &unode);
+ if (error) {
+ vgone(nvp);
+ vput(nvp);
+ }
+ nvp->v_data = unode;
+ unode->vnode = nvp;
+
+ /* mark the root node as such */
+ if (ump->fileset_desc &&
+ ino == udf_get_node_id(&ump->fileset_desc->rootdir_icb))
+ nvp->v_vflag |= VV_ROOT;
+
+ /*
+ * Translate UDF filetypes into vnode types.
+ *
+ * Systemfiles like the meta main and mirror files are not treated as
+ * normal files, so we type them as having no type. UDF dictates that
+ * they are not allowed to be visible.
+ */
+ if (unode->fe)
+ udf_file_type = unode->fe->icbtag.file_type;
+ else
+ udf_file_type = unode->efe->icbtag.file_type;
+
+ switch (udf_file_type) {
+ case UDF_ICB_FILETYPE_DIRECTORY :
+ case UDF_ICB_FILETYPE_STREAMDIR :
+ nvp->v_type = VDIR;
+ break;
+ case UDF_ICB_FILETYPE_BLOCKDEVICE :
+ nvp->v_type = VBLK;
+ break;
+ case UDF_ICB_FILETYPE_CHARDEVICE :
+ nvp->v_type = VCHR;
+ break;
+ case UDF_ICB_FILETYPE_SOCKET :
+ nvp->v_type = VSOCK;
+ break;
+ case UDF_ICB_FILETYPE_FIFO :
+ nvp->v_type = VFIFO;
+ break;
+ case UDF_ICB_FILETYPE_SYMLINK :
+ nvp->v_type = VLNK;
+ break;
+ case UDF_ICB_FILETYPE_VAT :
+ case UDF_ICB_FILETYPE_META_MAIN :
+ case UDF_ICB_FILETYPE_META_MIRROR :
+ nvp->v_type = VNON;
+ break;
+ case UDF_ICB_FILETYPE_RANDOMACCESS :
+ case UDF_ICB_FILETYPE_REALTIME :
+ nvp->v_type = VREG;
+ break;
+ default:
+ nvp->v_type = VBAD;
+ }
+
+ /* TODO specfs, fifofs etc etc. vnops setting */
+
+ /* don't forget to set vnode's v_size */
+/* uvm_vnp_setsize(nvp, file_size); */
+
+ if (nvp->v_type != VFIFO)
+ VN_LOCK_ASHARE(nvp);
+
+ *vpp = nvp;
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Lookup vnode for file handle specified
+ */
+int
+udf_fhtovp(struct mount *mp, struct fid *fhp, int flags,
+ struct vnode **vpp)
+{
+ struct vnode *nvp;
+ struct udf_fid *ufid = (struct udf_fid*)fhp;
+ struct udf_node *udf_node;
+ uint64_t filelen;
+ int error;
+
+ if ((error = VFS_VGET(mp, ufid->ino, LK_EXCLUSIVE, &nvp)) != 0) {
+ *vpp = NULLVP;
+ return (error);
+ }
+
+ udf_node = VTOI(nvp);
+ if (udf_node->efe)
+ filelen = le64toh(udf_node->efe->inf_len);
+ else
+ filelen = le64toh(udf_node->fe->inf_len);
+
+ *vpp = nvp;
+ vnode_create_vobject(*vpp, filelen, curthread);
+
+ return (0);
+}
+
Index: sys/fs/udf2/udf_vnops.c
===================================================================
--- /dev/null
+++ sys/fs/udf2/udf_vnops.c
@@ -0,0 +1,2489 @@
+/*-
+ * Copyright (c) 2012 Oleksandr Dudinskyi
+ * Copyright (c) 2012 Will DeVries
+ * Copyright (c) 2006, 2008 Reinoud Zandijk
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Generic parts are derived from software contributed to The NetBSD Foundation
+ * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
+ * 2005 program.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/cdefs.h>
+#include <sys/endian.h>
+#include <sys/systm.h>
+#include <sys/namei.h>
+#include <sys/buf.h>
+#include <sys/mount.h>
+#include <sys/vnode.h>
+#include <sys/malloc.h>
+#include <sys/dirent.h>
+#include <sys/unistd.h>
+#include <sys/bio.h>
+
+#include "ecma167-udf.h"
+#include "udf.h"
+#include "udf_subr.h"
+
+
+static vop_access_t udf_access;
+static vop_bmap_t udf_bmap;
+static vop_cachedlookup_t udf_cachedlookup;
+static vop_getattr_t udf_getattr;
+static vop_ioctl_t udf_ioctl;
+static vop_open_t udf_open;
+static vop_pathconf_t udf_pathconf;
+static vop_print_t udf_print;
+static vop_read_t udf_read;
+static vop_readdir_t udf_readdir;
+static vop_readlink_t udf_readlink;
+static vop_reclaim_t udf_reclaim;
+static vop_setattr_t udf_setattr;
+static vop_strategy_t udf_strategy;
+static vop_vptofh_t udf_vptofh;
+
+static struct vop_vector udf_vnodeops = {
+ .vop_default = &default_vnodeops,
+ .vop_access = udf_access,
+ .vop_getattr = udf_getattr,
+ .vop_open = udf_open,
+ .vop_ioctl = udf_ioctl,
+ .vop_pathconf = udf_pathconf,
+ .vop_print = udf_print,
+ .vop_read = udf_read,
+ .vop_readdir = udf_readdir,
+ .vop_readlink = udf_readlink,
+ .vop_setattr = udf_setattr,
+ .vop_strategy = udf_strategy,
+ .vop_bmap = udf_bmap,
+ .vop_cachedlookup = udf_cachedlookup,
+ .vop_reclaim = udf_reclaim,
+ .vop_vptofh = udf_vptofh,
+ .vop_lookup = vfs_cache_lookup,
+};
+
+struct vop_vector udf_fifoops = {
+ .vop_access = udf_access,
+ .vop_getattr = udf_getattr,
+ .vop_print = udf_print,
+ .vop_setattr = udf_setattr,
+ .vop_reclaim = udf_reclaim,
+ .vop_vptofh = udf_vptofh,
+ .vop_default = &fifo_specops,
+};
+
+/* implementations of vnode functions; table follows at end */
+/* --------------------------------------------------------------------- */
+
+int
+udf_getanode(struct mount *mp, struct vnode **vpp)
+{
+ DPRINTF(CALL, ("udf_getanode called\n"));
+
+ return (getnewvnode("udf2", mp, &udf_vnodeops, vpp));
+}
+
+
+#if 0
+int
+udf_inactive(void *v)
+{
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ bool *a_recycle;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ int refcnt;
+
+ DPRINTF(NODE, ("udf_inactive called for udf_node %p\n", VTOI(vp)));
+
+ if (udf_node == NULL) {
+ DPRINTF(NODE, ("udf_inactive: inactive NULL UDF node\n"));
+ VOP_UNLOCK(vp);
+ return (0);
+ }
+
+ /*
+ * Optionally flush metadata to disc. If the file has not been
+ * referenced anymore in a directory we ought to free up the resources
+ * on disc if applicable.
+ */
+ if (udf_node->fe) {
+ refcnt = le16toh(udf_node->fe->link_cnt);
+ } else {
+ assert(udf_node->efe);
+ refcnt = le16toh(udf_node->efe->link_cnt);
+ }
+
+ if ((refcnt == 0) && (vp->v_vflag & VV_SYSTEM)) {
+ DPRINTF(VOLUMES, ("UDF_INACTIVE deleting VV_SYSTEM\n"));
+ /* system nodes are not writen out on inactive, so flush */
+ udf_node->i_flags = 0;
+ }
+
+ *ap->a_recycle = false;
+ if ((refcnt == 0) && ((vp->v_vflag & VV_SYSTEM) == 0)) {
+ /* remove this file's allocation */
+ DPRINTF(NODE, ("udf_inactive deleting unlinked file\n"));
+ *ap->a_recycle = true;
+ udf_delete_node(udf_node);
+ VOP_UNLOCK(vp);
+ vrecycle(vp, NULL, curlwp);
+ return (0);
+ }
+
+ /* write out its node */
+ if (udf_node->i_flags & (IN_CHANGE | IN_UPDATE | IN_MODIFIED))
+ udf_update(vp, NULL, NULL, NULL, 0);
+ VOP_UNLOCK(vp);
+
+ return (0);
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+
+static int
+udf_reclaim(struct vop_reclaim_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+
+ DPRINTF(NODE, ("udf_reclaim called for node %p\n", udf_node));
+ /*
+ * Destroy the vm object and flush associated pages.
+ */
+ vnode_destroy_vobject(vp);
+
+#if 0
+ /* update note for closure */
+ udf_update(vp, NULL, NULL, NULL, UPDATE_CLOSE);
+
+ /* async check to see if all node descriptors are written out */
+ while ((volatile int) udf_node->outstanding_nodedscr > 0) {
+ vprint("udf_reclaim(): waiting for writeout\n", vp);
+ tsleep(&udf_node->outstanding_nodedscr, PRIBIO, "recl wait", hz/8);
+ }
+
+ /* purge old data from namei */
+ cache_purge(vp);
+#endif
+ /* dispose all node knowledge */
+ if (udf_node == NULL) {
+ DPRINTF(NODE, ("udf_reclaim(): null udfnode\n"));
+ } else {
+ vfs_hash_remove(vp);
+ /* dispose all node knowledge */
+ udf_dispose_node(udf_node);
+ vp->v_data = NULL;
+ }
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+static int
+udf_read(struct vop_read_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct uio *uio = ap->a_uio;
+ struct buf *bp;
+ struct udf_node *udf_node = VTOI(vp);
+ uint64_t file_size;
+ int on, n, lbn;
+ int error = 0;
+
+ DPRINTF(READ, ("udf_read called\n"));
+
+ /* can this happen? some filingsystems have this check */
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+ if (uio->uio_resid == 0)
+ return (0);
+
+#ifdef INVARIANTS
+ /* As in ffs_read() */
+ if (vp->v_type != VDIR && vp->v_type != VREG)
+ panic("udf_read: type %d", vp->v_type);
+#endif
+
+ KASSERT(udf_node, ("udf_read: udf_node is null"));
+ KASSERT(udf_node->fe || udf_node->efe, ("udf_read: Extended File Entry or File Entry is null"));
+
+ /* get file/directory filesize */
+ if (udf_node->fe)
+ file_size = le64toh(udf_node->fe->inf_len);
+ else
+ KASSERT(udf_node->efe, ("Extended File Entry is null"));
+ file_size = le64toh(udf_node->efe->inf_len);
+
+ /* read contents using buffercache */
+ do {
+ /* reached end? */
+ if (file_size <= uio->uio_offset)
+ break;
+
+ /* maximise length to file extremity */
+ n = min(file_size - uio->uio_offset, uio->uio_resid);
+
+ lbn = uio->uio_offset / udf_node->ump->discinfo.sector_size;
+ on = uio->uio_offset % udf_node->ump->discinfo.sector_size;
+ n = min(udf_node->ump->discinfo.sector_size - on, uio->uio_resid);
+ n = min(n, file_size - uio->uio_offset);
+ error = bread(vp, lbn, udf_node->ump->discinfo.sector_size, NOCRED, &bp);
+ n = min(n, udf_node->ump->discinfo.sector_size - bp->b_resid);
+
+ if (!error)
+ error = uiomove(bp->b_data + on, n, uio);
+
+ brelse(bp);
+ } while (error == 0 && uio->uio_resid > 0 && n != 0);
+#if 0
+ /* note access time unless not requested */
+ if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
+ udf_node->i_flags |= IN_ACCESS;
+ if ((ioflag & IO_SYNC) == IO_SYNC)
+ error = udf_update(vp, NULL, NULL, NULL, UPDATE_WAIT);
+ }
+#endif
+
+ return (error);
+}
+/* --------------------------------------------------------------------- */
+#ifdef WRITE_SUPPORT
+int
+udf_write(struct vop_write_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct uio *uio = ap->a_uio;
+ int ioflag = ap->a_ioflag;
+ kauth_cred_t cred = ap->a_cred;
+ int advice = IO_ADV_DECODE(ap->a_ioflag);
+ struct uvm_object *uobj;
+ struct udf_node *udf_node = VTOI(vp);
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ uint64_t file_size, old_size, old_offset;
+ vsize_t len;
+ int async = vp->v_mount->mnt_flag & MNT_ASYNC;
+ int aflag = ioflag & IO_SYNC ? B_SYNC : 0;
+ int error;
+ int resid, extended;
+
+ /*
+ * XXX writing to extended attributes not yet implemented. FreeBSD has
+ * it in mind to forward the IO_EXT read call to the
+ * VOP_READEXTATTR().
+ */
+
+ DPRINTF(WRITE, ("udf_write called\n"));
+
+ /* can this happen? some filingsystems have this check */
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+ if (uio->uio_resid == 0)
+ return (0);
+
+ /* protect against rogue programs writing raw directories or links */
+ if ((ioflag & IO_ALTSEMANTICS) == 0) {
+ if (vp->v_type == VDIR)
+ return (EISDIR);
+ /* all but regular files just give EINVAL for now */
+ if (vp->v_type != VREG)
+ return (EINVAL);
+ }
+
+ assert(udf_node);
+ assert(udf_node->fe || udf_node->efe);
+
+ /* get file/directory filesize */
+ if (udf_node->fe) {
+ fe = udf_node->fe;
+ file_size = le64toh(fe->inf_len);
+ } else {
+ assert(udf_node->efe);
+ efe = udf_node->efe;
+ file_size = le64toh(efe->inf_len);
+ }
+ old_size = file_size;
+
+ /* if explicitly asked to append, uio_offset can be wrong? */
+ if (ioflag & IO_APPEND)
+ uio->uio_offset = file_size;
+
+ extended = (uio->uio_offset + uio->uio_resid > file_size);
+ if (extended) {
+ DPRINTF(WRITE, ("extending file from %"PRIu64" to %"PRIu64"\n",
+ file_size, uio->uio_offset + uio->uio_resid));
+ error = udf_grow_node(udf_node, uio->uio_offset + uio->uio_resid);
+ if (error)
+ return (error);
+ file_size = uio->uio_offset + uio->uio_resid;
+ }
+
+ /* write contents using buffercache */
+ uobj = &vp->v_uobj;
+ resid = uio->uio_resid;
+ error = 0;
+
+ uvm_vnp_setwritesize(vp, file_size);
+ old_offset = uio->uio_offset;
+ while (uio->uio_resid > 0) {
+ /* maximise length to file extremity */
+ len = MIN(file_size - uio->uio_offset, uio->uio_resid);
+ if (len == 0)
+ break;
+
+ genfs_node_wrlock(vp);
+ error = GOP_ALLOC(vp, uio->uio_offset, len, aflag, cred);
+ genfs_node_unlock(vp);
+ if (error)
+ break;
+
+ /* ubc, here we come, prepare to trap */
+ error = ubc_uiomove(uobj, uio, len, advice,
+ UBC_WRITE | UBC_UNMAP_FLAG(vp));
+ if (error)
+ break;
+
+ /*
+ * flush what we just wrote if necessary.
+ * XXXUBC simplistic async flushing.
+ *
+ * Directories are excluded since its file data that we want
+ * to purge.
+ */
+ if (!async && (vp->v_type != VDIR) &&
+ (old_offset >> 16 != uio->uio_offset >> 16)) {
+ mutex_enter(&vp->v_interlock);
+ error = VOP_PUTPAGES(vp, (old_offset >> 16) << 16,
+ (uio->uio_offset >> 16) << 16, PGO_CLEANIT);
+ old_offset = uio->uio_offset;
+ }
+ }
+ uvm_vnp_setsize(vp, file_size);
+
+ /* mark node changed and request update */
+ udf_node->i_flags |= IN_CHANGE | IN_UPDATE;
+
+ /*
+ * XXX TODO FFS has code here to reset setuid & setgid when we're not
+ * the superuser as a precaution against tampering.
+ */
+
+ /* if we wrote a thing, note write action on vnode */
+ if (resid > uio->uio_resid)
+ VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
+
+ if (error) {
+ /* bring back file size to its former size */
+ /* take notice of its errors? */
+ (void) udf_chsize(vp, (u_quad_t) old_size, cred);
+
+ /* roll back uio */
+ uio->uio_offset -= resid - uio->uio_resid;
+ uio->uio_resid = resid;
+ } else {
+ /* if we write and we're synchronous, update node */
+ if ((resid > uio->uio_resid) && ((ioflag & IO_SYNC) == IO_SYNC))
+ error = udf_update(vp, NULL, NULL, NULL, UPDATE_WAIT);
+ }
+
+ return (error);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+static int
+udf_bmap(struct vop_bmap_args /* {
+ struct vop_generic_args a_gen;
+ struct vnode *a_vp;
+ daddr_t a_bn;
+ struct bufobj **a_bop;
+ daddr_t *a_bnp;
+ int *a_runp;
+ int *a_runb;
+ } */ *ap)
+{
+ struct vnode *vp = ap->a_vp; /* our node */
+ struct udf_node *udf_node = VTOI(vp);
+ uint64_t lsector;
+ uint32_t maxblks;
+ int error;
+
+ if (ap->a_bop != NULL)
+ *ap->a_bop = &udf_node->ump->devvp->v_bufobj;
+
+ if (ap->a_bnp == NULL)
+ return (0);
+
+ if (ap->a_runb)
+ *ap->a_runb = 0;
+
+ /* get logical block and run */
+ error = udf_bmap_translate(udf_node, ap->a_bn, &lsector, &maxblks);
+ if (error)
+ return (error);
+
+ /* Translate logical to phisascal sector number*/
+ if (lsector == UDF_TRANS_INTERN)
+ return (EOPNOTSUPP);
+ else if (lsector == UDF_TRANS_ZERO)
+ *ap->a_bnp = -1; /* zero the buffer */
+ else
+ *ap->a_bnp = lsector * (udf_node->ump->discinfo.sector_size/DEV_BSIZE);
+
+ /*
+ * Determine maximum number of readahead blocks following the
+ * requested block.
+ */
+ if (ap->a_runp) {
+ if (maxblks <= 1)
+ *ap->a_runp = 0;
+ else if (maxblks - 1 >= MAXBSIZE)
+ *ap->a_runp = MAXBSIZE - 1;
+ else
+ *ap->a_runp = maxblks - 1;
+
+ }
+ if (ap->a_runb)
+ *ap->a_runb = 0;
+
+ /* return success */
+ return (0);
+}
+
+static int
+udf_strategy(struct vop_strategy_args /* {
+ struct vnode *a_vp;
+ struct buf *a_bp;
+ } */ *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct buf *bp = ap->a_bp;
+ struct udf_node *udf_node = VTOI(vp);
+ struct bufobj *bo = udf_node->ump->bo;
+ uint64_t lsector;
+ int error;
+ uint32_t lb_size, from, sectors;
+ uint32_t maxblks;
+
+ DPRINTF(STRATEGY, ("udf_strategy called\n"));
+
+ /* check if we ought to be here */
+ if (vp->v_type == VBLK || vp->v_type == VCHR)
+ panic("udf_strategy: spec");
+
+ /* only filebuffers ought to be read/write by this, no descriptors */
+ KASSERT(bp->b_blkno >= 0, ("udf_strategy: nonexistent physical block number"));
+
+ /* get sector size */
+ lb_size = udf_node->ump->discinfo.sector_size;
+
+ /* calculate sector to start from */
+ from = bp->b_blkno;
+
+ /* calculate length to fetch/store in sectors */
+ sectors = bp->b_bcount / lb_size;
+ KASSERT(bp->b_bcount > 0, ("udf_strategy: no valid bytes in buffer"));
+
+ /* NEVER assume later that this buffer is already translated */
+ /* bp->b_lblkno = bp->b_blkno; */
+
+ /* check assertions: we OUGHT to always get multiples of this */
+ KASSERT(sectors * lb_size == bp->b_bcount, ("udf_strategy: wrong length"
+ "to fetch/store in sectors"));
+ if (bp->b_blkno == bp->b_lblkno) {
+ /* get logical block and run */
+ error = udf_bmap_translate(udf_node, bp->b_lblkno, &lsector, &maxblks);
+ if (error) {
+ bp->b_error = error;
+ bufdone(bp);
+ return (error);
+ }
+ }
+ if (bp->b_iocmd & BIO_READ) {
+ DPRINTF(STRATEGY, ("\tread vp %p buf %p (blk no %ju)"
+ ", sector %d for %d sectors\n",
+ vp, bp, (uintmax_t)bp->b_blkno, from, sectors));
+
+ if (lsector == UDF_TRANS_ZERO) {
+ /* copy sezo sector */
+ memset(bp->b_data, 0, lb_size);
+ if ((bp->b_flags & B_ASYNC) == 0)
+ bufwait(bp);
+ /* pre-check if its an internal */
+ } else if (lsector == UDF_TRANS_INTERN) {
+ error = udf_read_internal(udf_node, (uint8_t *) bp->b_data);
+ if (error)
+ bp->b_error = error;
+ bufdone(bp);
+ if ((bp->b_flags & B_ASYNC) == 0)
+ bufwait(bp);
+ } else {
+ /* bmap gives sector numbers. bio works with device blocks */
+ bp->b_blkno = lsector * (udf_node->ump->discinfo.sector_size/DEV_BSIZE);
+ bp->b_iooffset = dbtob(bp->b_blkno);
+ BO_STRATEGY(bo, bp);
+ }
+ } else {
+ DPRINTF(STRATEGY, ("\twrite vp %p buf %p (blk no %ju)"
+ ", sector %d for %d sectors\n",
+ vp, bp, (uintmax_t)bp->b_blkno, from, sectors));
+
+ return (ENOTSUP);
+ }
+ return (bp->b_error);
+}
+
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_readdir(struct vop_readdir_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ int *a_eofflag;
+ int *a_ncookies;
+ u_long **a_cookies;
+ } */ *ap)
+{
+ struct uio *uio;
+ struct vnode *vp;
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ struct fileid_desc *fid;
+ struct dirent *dirent;
+ struct udf_mount *ump;
+ struct udf_node *udf_node;
+ uint64_t *cookiesp, *cookies, cookie;
+ uint64_t file_size, diroffset, transoffset;
+ int ncookies, acookies;
+ int error;
+ uint32_t lb_size;
+ char *fid_name;
+
+ uio = ap->a_uio;
+ vp = ap->a_vp;
+ udf_node = VTOI(vp);
+ ump = udf_node->ump;
+
+ DPRINTF(READDIR, ("udf_readdir called\n"));
+
+ /* This operation only makes sense on directory nodes. */
+ if (vp->v_type != VDIR)
+ return (ENOTDIR);
+
+ /* get directory filesize */
+ if (udf_node->fe) {
+ fe = udf_node->fe;
+ file_size = le64toh(fe->inf_len);
+ } else {
+ efe = udf_node->efe;
+ file_size = le64toh(efe->inf_len);
+ }
+
+ dirent = malloc(sizeof(struct dirent), M_UDFTEMP, M_WAITOK | M_ZERO);
+
+ if (ap->a_ncookies != NULL) {
+ /*
+ * Guess how many entries are needed. If we run out, this
+ * function will be called again and thing will pick up
+ * were it left off.
+ */
+ ncookies = uio->uio_resid / 8;
+ cookies = malloc(sizeof(u_long) * ncookies, M_TEMP,
+ M_WAITOK | M_ZERO);
+ if (cookies == NULL)
+ return (ENOMEM);
+ cookiesp = cookies;
+ } else {
+ ncookies = 0;
+ cookies = NULL;
+ cookiesp = NULL;
+ }
+ acookies = 0;
+
+ /*
+ * Add `.' pseudo entry if at offset zero since its not in the fid
+ * stream
+ */
+ if (uio->uio_offset == 0) {
+ DPRINTF(READDIR, ("\t'.' inserted\n"));
+ memset(dirent, 0, sizeof(struct dirent));
+ dirent->d_fileno = udf_get_node_id(&udf_node->loc);
+ dirent->d_type = DT_DIR;
+ dirent->d_name[0] = '.';
+ dirent->d_name[1] = '\0';
+ dirent->d_namlen = 1;
+ dirent->d_reclen = GENERIC_DIRSIZ(dirent);
+ if (cookiesp) {
+ acookies++;
+ *cookiesp++ = 1;
+ }
+ error = uiomove(dirent, GENERIC_DIRSIZ(dirent), uio);
+ if (error)
+ goto bail;
+
+ /* mark with magic value that we have done the dummy */
+ uio->uio_offset = UDF_DIRCOOKIE_DOT;
+ }
+
+ /* we are called just as long as we keep on pushing data in */
+ error = 0;
+ if (uio->uio_offset < file_size) {
+ /* allocate temporary space for fid */
+ lb_size = le32toh(udf_node->ump->logical_vol->lb_size);
+ fid = malloc(lb_size, M_UDFTEMP, M_WAITOK);
+
+ if (uio->uio_offset == UDF_DIRCOOKIE_DOT)
+ uio->uio_offset = 0;
+
+ diroffset = uio->uio_offset;
+ transoffset = diroffset;
+ while (diroffset < file_size) {
+ DPRINTF(READDIR, ("\tread in fid stream\n"));
+ /* transfer a new fid/dirent */
+ error = udf_read_fid_stream(vp, &diroffset, fid);
+ DPRINTFIF(READDIR, error, ("read error in read fid "
+ "stream : %d\n", error));
+ if (error) {
+ printf("Read error in read fid: %d\n", error);
+ break;
+ }
+
+ /*
+ * create resulting dirent structure
+ */
+ memset(dirent, 0, sizeof(struct dirent));
+ dirent->d_fileno = udf_get_node_id(&fid->icb); /* inode hash XXX */
+
+ /* Not worth trying to go for the filetypes now, too expensive */
+ dirent->d_type = DT_UNKNOWN;
+ if (fid->file_char & UDF_FILE_CHAR_DIR)
+ dirent->d_type = DT_DIR;
+
+ /* '..' has no name, so provide one */
+ if (fid->file_char & UDF_FILE_CHAR_PAR) {
+ dirent->d_name[0] = '.';
+ dirent->d_name[1] = '.';
+ dirent->d_name[2] = '\0';
+ dirent->d_namlen = 2;
+ cookie = 2;
+ }
+ else {
+ fid_name = (char *) fid->data +
+ le16toh(fid->l_iu);
+ udf_to_unix_name(ump, dirent->d_name, MAXNAMLEN,
+ fid_name, fid->l_fi);
+ dirent->d_namlen = strlen(dirent->d_name);
+ cookie = transoffset;
+ }
+ dirent->d_reclen = GENERIC_DIRSIZ(dirent);
+
+ /*
+ * If there isn't enough space in the uio to return a
+ * whole dirent, break off read
+ */
+ if (uio->uio_resid < GENERIC_DIRSIZ(dirent))
+ break;
+
+ /* remember the last entry we transferred */
+ transoffset = diroffset;
+
+ /* skip deleted entries */
+ if (fid->file_char & UDF_FILE_CHAR_DEL)
+ continue;
+
+ /* skip not visible files */
+ if (fid->file_char & UDF_FILE_CHAR_VIS)
+ continue;
+
+ /* copy dirent to the caller */
+ DPRINTF(READDIR, ("\tread dirent `%s', type %d\n",
+ dirent->d_name, dirent->d_type));
+ if (cookiesp) {
+ if (acookies + 1 > ncookies)
+ break;
+ acookies++;
+ *cookiesp++ = cookie;
+ }
+ error = uiomove(dirent, GENERIC_DIRSIZ(dirent), uio);
+ if (error)
+ break;
+ }
+
+ /* pass on last transferred offset */
+ /* We lied for '.', so tell more lies. */
+ uio->uio_offset = transoffset;
+ free(fid, M_UDFTEMP);
+ }
+
+bail:
+ /* tell the calling layer whether we need to be called again */
+ if (ap->a_eofflag)
+ *ap->a_eofflag = (uio->uio_offset >= file_size);
+
+ if (error < 0)
+ error = 0;
+
+ if (ap->a_ncookies != NULL) {
+ if (error) {
+ free(cookies, M_UDFTEMP);
+ } else {
+ *ap->a_ncookies = acookies;
+ *ap->a_cookies = (u_long *) cookies;
+ }
+ }
+
+#ifdef DEBUG
+ if (udf_verbose & UDF_DEBUG_READDIR) {
+ printf("returning offset %d\n", (uint32_t) uio->uio_offset);
+ if (ap->a_eofflag)
+ printf("returning EOF ? %d\n", *ap->a_eofflag);
+ if (error)
+ printf("readdir returning error %d\n", error);
+ }
+#endif
+
+ free(dirent, M_UDFTEMP);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_cachedlookup(struct vop_cachedlookup_args *ap)
+{
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ struct vnode *tdp = NULL;
+ struct componentname *cnp = ap->a_cnp;
+ struct fileid_desc *fid;
+ struct udf_node *dir_node;
+ struct udf_mount *ump;
+ uint64_t file_size, offset;
+ ino_t id = 0;
+ int nameiop, islastcn, mounted_ro, numpasses;
+ int unix_len, ltype;
+ int error = 0;
+ char *fid_name;
+ char *unix_name;
+
+ dir_node = VTOI(dvp);
+ ump = dir_node->ump;
+ *vpp = NULL;
+
+ DPRINTF(LOOKUP, ("udf_cachedlookup callen\n"));
+
+ /* simplify/clarification flags */
+ nameiop = cnp->cn_nameiop;
+ islastcn = cnp->cn_flags & ISLASTCN;
+ mounted_ro = dvp->v_mount->mnt_flag & MNT_RDONLY;
+
+ /* check exec/dirread permissions first */
+ error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, curthread);
+ if (error)
+ return (error);
+
+ DPRINTF(LOOKUP, ("\taccess ok\n"));
+
+ /*
+ * If requesting a modify on the last path element on a read-only
+ * filingsystem, reject lookup.
+ */
+ if (islastcn && mounted_ro && (nameiop == DELETE || nameiop == RENAME))
+ return (EROFS);
+
+ DPRINTF(LOOKUP, ("\tlooking up cnp->cn_nameptr '%s'\n", cnp->cn_nameptr));
+
+ /* look in the namei cache; return 0 on success!! */
+#if 0
+ error = cache_lookup(dvp, vpp, cnp);
+ if (error >= 0)
+ return error;
+
+ DPRINTF(LOOKUP, ("\tNOT found in cache\n"));
+#endif
+ /* get directory filesize */
+ if (dir_node->fe)
+ file_size = le64toh(dir_node->fe->inf_len);
+ else
+ file_size = le64toh(dir_node->efe->inf_len);
+
+ /*
+ * If this is a LOOKUP and we've already partially searched through
+ * the directory, pick up where we left off and flag that the
+ * directory may need to be searched twice. For a full description,
+ * see /sys/fs/cd9660/cd9660_lookup.c:cd9660_lookup()
+ */
+ if (nameiop != LOOKUP || dir_node->diroff == 0 || dir_node->diroff > file_size) {
+ offset = 0;
+ numpasses = 1;
+ }
+ else {
+ offset = dir_node->diroff;
+ numpasses = 2;
+ nchstats.ncs_2passes++;
+ }
+
+ fid = malloc(ump->discinfo.sector_size, M_UDFTEMP, M_WAITOK);
+ unix_name = malloc(MAXNAMLEN, M_UDFTEMP, M_WAITOK);
+lookuploop:
+ while (offset < file_size) {
+ error = udf_read_fid_stream(dvp, &offset, fid);
+ if (error) {
+ break;
+ }
+
+ /* skip deleted entries */
+ if (fid->file_char & UDF_FILE_CHAR_DEL)
+ continue;
+
+ /* skip not visible files */
+ if (fid->file_char & UDF_FILE_CHAR_VIS)
+ continue;
+
+ if (fid->file_char & UDF_FILE_CHAR_PAR) {
+ if (cnp->cn_flags & ISDOTDOT) {
+ id = udf_get_node_id(&fid->icb);
+ break;
+ }
+ }
+ else {
+ fid_name = (char *) fid->data + le16toh(fid->l_iu);
+ udf_to_unix_name(ump, unix_name, MAXNAMLEN, fid_name,
+ fid->l_fi);
+ unix_len = strlen(unix_name);
+
+ if (unix_len == cnp->cn_namelen) {
+ if (!strncmp(unix_name, cnp->cn_nameptr, cnp->cn_namelen)) {
+ id = udf_get_node_id(&fid->icb);
+ break;
+ }
+ }
+ }
+ }
+
+ if (error)
+ goto exit;
+
+ /* Did we have a match? */
+ if (id) {
+ /*
+ * Remember where this entry was if it's the final component.
+ */
+ if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == LOOKUP)
+ dir_node->diroff = offset;
+ if (numpasses == 2)
+ nchstats.ncs_pass2++;
+
+ if (cnp->cn_flags & ISDOTDOT) {
+ vn_vget_ino(dvp, id, cnp->cn_lkflags, &tdp);
+ }
+ else if (dir_node->hash_id == id) {
+ VREF(dvp); /* we want ourself, ie "." */
+ /*
+ * When we lookup "." we still can be asked to lock it
+ * differently.
+ */
+ ltype = cnp->cn_lkflags & LK_TYPE_MASK;
+ if (ltype != VOP_ISLOCKED(dvp)) {
+ if (ltype == LK_EXCLUSIVE)
+ vn_lock(dvp, LK_UPGRADE | LK_RETRY);
+ else /* if (ltype == LK_SHARED) */
+ vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
+ }
+ tdp = dvp;
+ }
+ else {
+ error = udf_vget(ump->vfs_mountp, id, cnp->cn_lkflags, &tdp);
+ }
+
+ if (!error) {
+ *vpp = tdp;
+ /* Put this entry in the cache */
+ if (cnp->cn_flags & MAKEENTRY)
+ cache_enter(dvp, *vpp, cnp);
+ }
+ }
+ else {
+ /* Name wasn't found on this pass. Do another pass? */
+ if (numpasses-- == 2) {
+ offset = 0;
+ goto lookuploop;
+ }
+
+ /* Enter name into cache as non-existant */
+ if (cnp->cn_flags & MAKEENTRY)
+ cache_enter(dvp, *vpp, cnp);
+
+ if ((cnp->cn_flags & ISLASTCN) &&
+ (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
+ error = EROFS;
+ else
+ error = ENOENT;
+ }
+
+exit:
+ free(fid, M_UDFTEMP);
+ free(unix_name, M_UDFTEMP);
+
+ DPRINTFIF(LOOKUP, error, ("udf_cachedlookup returning error %d\n", error));
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_getattr(struct vop_getattr_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ struct file_entry *fe = udf_node->fe;
+ struct extfile_entry *efe = udf_node->efe;
+ struct filetimes_extattr_entry *ft_extattr;
+ struct device_extattr_entry *devattr;
+ struct vattr *vap = ap->a_vap;
+ struct timestamp *atime, *mtime, *attrtime, *creatime;
+ struct udf_mount *ump = udf_node->ump;
+ uint64_t filesize, blkssize;
+ uint32_t nlink, offset, a_l;
+ uint8_t *filedata;
+ uid_t uid;
+ gid_t gid;
+ int error;
+
+ DPRINTF(CALL, ("udf_getattr called\n"));
+
+ /* update times before we returning values */
+#if 0
+ udf_itimes(udf_node, NULL, NULL, NULL);
+#endif
+
+ /* get descriptor information */
+ if (fe) {
+ nlink = le16toh(fe->link_cnt);
+ uid = (uid_t)le32toh(fe->uid);
+ gid = (gid_t)le32toh(fe->gid);
+ filesize = le64toh(fe->inf_len);
+ blkssize = le64toh(fe->logblks_rec);
+ atime = &fe->atime;
+ mtime = &fe->mtime;
+ attrtime = &fe->attrtime;
+ filedata = fe->data;
+
+ /* initial guess */
+ creatime = mtime;
+
+ /* check our extended attribute if present */
+ error = udf_extattr_search_intern(udf_node,
+ UDF_FILETIMES_ATTR_NO, "", &offset, &a_l);
+ if (!error) {
+ ft_extattr = (struct filetimes_extattr_entry *)
+ (filedata + offset);
+ if (ft_extattr->existence & UDF_FILETIMES_FILE_CREATION)
+ creatime = &ft_extattr->times[0];
+ }
+ } else {
+ KASSERT(udf_node->efe, ("Extended File Entry is null"));
+ nlink = le16toh(efe->link_cnt);
+ uid = (uid_t)le32toh(efe->uid);
+ gid = (gid_t)le32toh(efe->gid);
+ filesize = le64toh(efe->inf_len); /* XXX or obj_size? */
+ blkssize = le64toh(efe->logblks_rec);
+ atime = &efe->atime;
+ mtime = &efe->mtime;
+ attrtime = &efe->attrtime;
+ creatime = &efe->ctime;
+ filedata = efe->data;
+ }
+
+ /* do the uid/gid translation game */
+ if (uid == (uid_t) -1)
+ uid = ump->mount_args.anon_uid;
+ if (gid == (gid_t) -1)
+ gid = ump->mount_args.anon_gid;
+
+ /*
+ * BUG-ALERT: UDF doesn't count '.' as an entry, so we'll have to add
+ * 1 to the link count if its a directory we're requested attributes
+ * of.
+ */
+ if (vp->v_type == VDIR) {
+ nlink++;
+
+ /* directories should be at least a single block? */
+ if (blkssize != 0)
+ filesize = blkssize * ump->discinfo.sector_size;
+ else
+ filesize = ump->discinfo.sector_size;
+ }
+
+ /* fill in struct vattr with values from the node */
+ vattr_null(vap);
+ vap->va_type = vp->v_type;
+ vap->va_mode = udf_getaccessmode(udf_node);
+ vap->va_nlink = nlink;
+ vap->va_uid = uid;
+ vap->va_gid = gid;
+ vap->va_fsid = dev2udev(ump->dev); /* vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0]; */
+ vap->va_fileid = udf_get_node_id(&udf_node->loc); /* inode hash XXX */
+ vap->va_size = filesize;
+ vap->va_blocksize = ump->discinfo.sector_size; /* wise? */
+
+ /* access times */
+ udf_timestamp_to_timespec(ump, atime, &vap->va_atime);
+ udf_timestamp_to_timespec(ump, mtime, &vap->va_mtime);
+ udf_timestamp_to_timespec(ump, attrtime, &vap->va_ctime);
+ udf_timestamp_to_timespec(ump, creatime, &vap->va_birthtime);
+
+ vap->va_gen = 1; /* no multiple generations yes (!?) */
+ vap->va_flags = 0; /* no flags */
+ vap->va_bytes = blkssize * ump->discinfo.sector_size;
+ vap->va_filerev = 0; /* TODO file revision numbers?
+ This was changed from a 1. */
+ vap->va_vaflags = 0;
+ /* TODO get vaflags from the extended attributes? */
+
+ if ((vap->va_type == VBLK) || (vap->va_type == VCHR)) {
+ error = udf_extattr_search_intern(udf_node,
+ UDF_DEVICESPEC_ATTR_NO, "",
+ &offset, &a_l);
+ /* if error, deny access */
+ if (error || (filedata == NULL)) {
+ vap->va_mode = 0; /* or v_type = VNON? */
+ } else {
+ devattr = (struct device_extattr_entry *)
+ filedata + offset;
+ vap->va_rdev = makedev(
+ le32toh(devattr->major),
+ le32toh(devattr->minor)
+ );
+ /* TODO we could check the implementator */
+ }
+ }
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+#if 0
+static int
+udf_chown(struct vnode *vp, uid_t new_uid, gid_t new_gid,
+ kauth_cred_t cred)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ uid_t uid;
+ gid_t gid;
+ int error;
+
+#ifdef notyet
+ /* TODO get vaflags from the extended attributes? */
+ /* Immutable or append-only files cannot be modified, either. */
+ if (udf_node->flags & (IMMUTABLE | APPEND))
+ return (EPERM);
+#endif
+
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* retrieve old values */
+ udf_getownership(udf_node, &uid, &gid);
+
+ /* only one could be specified */
+ if (new_uid == VNOVAL)
+ new_uid = uid;
+ if (new_gid == VNOVAL)
+ new_gid = gid;
+
+ /* check if we can fit it in an 32 bits */
+ if ((uid_t) ((uint32_t) new_uid) != new_uid)
+ return (EINVAL);
+ if ((gid_t) ((uint32_t) new_gid) != new_gid)
+ return (EINVAL);
+
+ /* check permissions */
+ error = genfs_can_chown(vp, cred, uid, gid, new_uid, new_gid);
+ if (error)
+ return (error);
+
+ /* change the ownership */
+ udf_setownership(udf_node, new_uid, new_gid);
+
+ /* mark node changed */
+ udf_node->i_flags |= IN_CHANGE;
+
+ return (0);
+}
+
+
+static int
+udf_chmod(struct vnode *vp, mode_t mode, kauth_cred_t cred)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ uid_t uid;
+ gid_t gid;
+ int error;
+
+#ifdef notyet
+ /* TODO get vaflags from the extended attributes? */
+ /* Immutable or append-only files cannot be modified, either. */
+ if (udf_node->flags & (IMMUTABLE | APPEND))
+ return (EPERM);
+#endif
+
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* retrieve uid/gid values */
+ udf_getownership(udf_node, &uid, &gid);
+
+ /* check permissions */
+ error = genfs_can_chmod(vp, cred, uid, gid, mode);
+ if (error)
+ return (error);
+
+ /* change mode */
+ udf_setaccessmode(udf_node, mode);
+
+ /* mark node changed */
+ udf_node->i_flags |= IN_CHANGE;
+
+ return (0);
+}
+
+
+/* exported */
+int
+udf_chsize(struct vnode *vp, u_quad_t newsize, kauth_cred_t cred)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ int error, extended;
+
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* Decide whether this is a valid operation based on the file type. */
+ switch (vp->v_type) {
+ case VDIR:
+ return (EISDIR);
+ case VREG:
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+ break;
+ case VBLK:
+ /* FALLTHROUGH */
+ case VCHR:
+ /* FALLTHROUGH */
+ case VFIFO:
+ /* Allow modifications of special files even if in the file
+ * system is mounted read-only (we are not modifying the
+ * files themselves, but the objects they represent). */
+ return (0);
+ default:
+ /* Anything else is unsupported. */
+ return (EOPNOTSUPP);
+ }
+
+#if notyet
+ /* TODO get vaflags from the extended attributes? */
+ /* Immutable or append-only files cannot be modified, either. */
+ if (node->flags & (IMMUTABLE | APPEND))
+ return (EPERM);
+#endif
+
+ /* resize file to the requested size */
+ error = udf_resize_node(udf_node, newsize, &extended);
+
+ if (error == 0) {
+ /* mark change */
+ udf_node->i_flags |= IN_CHANGE | IN_MODIFY;
+ VN_KNOTE(vp, NOTE_ATTRIB | (extended ? NOTE_EXTEND : 0));
+ udf_update(vp, NULL, NULL, NULL, 0);
+ }
+
+ return (error);
+}
+
+
+static int
+udf_chflags(struct vnode *vp, mode_t mode, kauth_cred_t cred)
+{
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* XXX we can't do this yet, but erroring out is enoying XXX */
+
+ return (0);
+}
+
+
+static int
+udf_chtimes(struct vnode *vp,
+ struct timespec *atime, struct timespec *mtime,
+ struct timespec *birthtime, int setattrflags,
+ kauth_cred_t cred)
+{
+ struct udf_node *udf_node = VTOI(vp);
+ uid_t uid;
+ gid_t gid;
+ int error;
+
+#ifdef notyet
+ /* TODO get vaflags from the extended attributes? */
+ /* Immutable or append-only files cannot be modified, either. */
+ if (udf_node->flags & (IMMUTABLE | APPEND))
+ return (EPERM);
+#endif
+
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (EROFS);
+
+ /* retrieve uid/gid values */
+ udf_getownership(udf_node, &uid, &gid);
+
+ /* check permissions */
+ error = genfs_can_chtimes(vp, setattrflags, uid, cred);
+ if (error)
+ return (error);
+
+ /* update node flags depending on what times are passed */
+ if (atime->tv_sec != VNOVAL)
+ if (!(vp->v_mount->mnt_flag & MNT_NOATIME))
+ udf_node->i_flags |= IN_ACCESS;
+ if ((mtime->tv_sec != VNOVAL) || (birthtime->tv_sec != VNOVAL))
+ udf_node->i_flags |= IN_CHANGE | IN_UPDATE;
+
+ return (udf_update(vp, atime, mtime, birthtime, 0));
+}
+#endif
+
+static int
+udf_setattr(struct vop_setattr_args *ap)
+{
+/* struct vnode *vp = ap->a_vp; */
+/* struct udf_node *udf_node = VTOI(vp); */
+/* struct udf_mount *ump = udf_node->ump; */
+/* kauth_cred_t cred = ap->a_cred; */
+ struct vattr *vap = ap->a_vap;
+ int error = 0;
+
+ DPRINTF(CALL, ("udf_setattr called\n"));
+
+ /* Abort if any unsettable attribute is given. */
+ if (vap->va_type != VNON ||
+ vap->va_nlink != VNOVAL ||
+ vap->va_fsid != VNOVAL ||
+ vap->va_fileid != VNOVAL ||
+ vap->va_blocksize != VNOVAL ||
+#ifdef notyet
+ /* checks are debated */
+ vap->va_ctime.tv_sec != VNOVAL ||
+ vap->va_ctime.tv_nsec != VNOVAL ||
+ vap->va_birthtime.tv_sec != VNOVAL ||
+ vap->va_birthtime.tv_nsec != VNOVAL ||
+#endif
+ vap->va_gen != VNOVAL ||
+ vap->va_rdev != VNOVAL ||
+ vap->va_bytes != VNOVAL)
+ error = EINVAL;
+
+ DPRINTF(ATTR, ("setattr changing:\n"));
+ if (error == 0 && (vap->va_flags != VNOVAL)) {
+ DPRINTF(ATTR, ("\tchflags\n"));
+ return (EROFS);
+/* error = udf_chflags(vp, vap->va_flags, cred); */
+ }
+
+ if (error == 0 && (vap->va_size != VNOVAL)) {
+ DPRINTF(ATTR, ("\tchsize\n"));
+ if (vap->va_type == VDIR)
+ return (EISDIR);
+ if (vap->va_type == VLNK || vap->va_type == VREG)
+ return (EROFS);
+/* error = udf_chsize(vp, vap->va_size, cred); */
+ }
+
+ if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL)) {
+ DPRINTF(ATTR, ("\tchown\n"));
+ return (EROFS);
+/* error = udf_chown(vp, vap->va_uid, vap->va_gid, cred); */
+ }
+
+ if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) {
+ DPRINTF(ATTR, ("\tchmod\n"));
+ return (EROFS);
+/* error = udf_chmod(vp, vap->va_mode, cred); */
+ }
+
+ if (error == 0 &&
+ ((vap->va_atime.tv_sec != VNOVAL &&
+ vap->va_atime.tv_nsec != VNOVAL) ||
+ (vap->va_mtime.tv_sec != VNOVAL &&
+ vap->va_mtime.tv_nsec != VNOVAL))
+ ) {
+ DPRINTF(ATTR, ("\tchimes\n"));
+ return (EROFS);
+/* error = udf_chtimes(vp, &vap->va_atime, &vap->va_mtime,
+ &vap->va_birthtime, vap->va_vaflags, cred); */
+ }
+/* VN_KNOTE(vp, NOTE_ATTRIB); */
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Return POSIX pathconf information for UDF file systems.
+ */
+static int
+udf_pathconf(struct vop_pathconf_args *ap)
+{
+ uint32_t bits;
+
+ DPRINTF(CALL, ("udf_pathconf called\n"));
+
+ switch (ap->a_name) {
+ case _PC_LINK_MAX:
+ *ap->a_retval = (1<<16)-1; /* 16 bits */
+ return (0);
+ case _PC_NAME_MAX:
+ *ap->a_retval = NAME_MAX;
+ return (0);
+ case _PC_PATH_MAX:
+ *ap->a_retval = PATH_MAX;
+ return (0);
+ case _PC_PIPE_BUF:
+ *ap->a_retval = PIPE_BUF;
+ return (0);
+ case _PC_CHOWN_RESTRICTED:
+ *ap->a_retval = 1;
+ return (0);
+ case _PC_NO_TRUNC:
+ *ap->a_retval = 1;
+ return (0);
+ case _PC_SYNC_IO:
+ *ap->a_retval = 0; /* synchronised is off for performance */
+ return (0);
+ case _PC_FILESIZEBITS:
+ /* 64 bit file offsets -> 2+floor(2log(2^64-1)) = 2 + 63 = 65 */
+ bits = 64; /* XXX ought to deliver 65 */
+
+ *ap->a_retval = bits;
+ return (0);
+ }
+
+ return (EINVAL);
+}
+
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_open(struct vop_open_args *ap)
+{
+ struct udf_node *udf_node = VTOI(ap->a_vp);
+ off_t file_size;
+ /* int flags; */
+
+ DPRINTF(CALL, ("udf_open called\n"));
+
+ /*
+ * Files marked append-only must be opened for appending.
+ * TODO: get chflags(2) flags from extened attribute.
+ */
+ /* if ((flags & APPEND) && (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
+ return (EPERM); */
+
+ if (udf_node->fe)
+ file_size = le64toh(udf_node->fe->inf_len);
+ else
+ file_size = le64toh(udf_node->efe->inf_len);
+
+ vnode_create_vobject(ap->a_vp, file_size, ap->a_td);
+
+ return (0);
+}
+
+
+/* --------------------------------------------------------------------- */
+#if 0
+int
+udf_close(void *v)
+{
+ struct vop_close_args /* {
+ struct vnode *a_vp;
+ int a_fflag;
+ kauth_cred_t a_cred;
+ struct proc *a_p;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ int async = vp->v_mount->mnt_flag & MNT_ASYNC;
+ int error;
+
+ DPRINTF(CALL, ("udf_close called\n"));
+ udf_node = udf_node; /* shut up gcc */
+
+ if (!async && (vp->v_type != VDIR)) {
+ mutex_enter(&vp->v_interlock);
+ error = VOP_PUTPAGES(vp, 0, 0, PGO_CLEANIT);
+ if (error)
+ return (error);
+ }
+
+ mutex_enter(&vp->v_interlock);
+ if (vp->v_usecount > 1)
+ udf_itimes(udf_node, NULL, NULL, NULL);
+ mutex_exit(&vp->v_interlock);
+
+ return (0);
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+static int
+udf_access(struct vop_access_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ accmode_t accmode = ap->a_accmode;
+ gid_t gid;
+ mode_t mode;
+ uid_t uid;
+ /* int flags = 0; */
+
+ DPRINTF(CALL, ("udf_access called\n"));
+
+ /* check if we are allowed to write */
+ switch (vp->v_type) {
+ case VDIR:
+ case VLNK:
+ case VREG:
+ /*
+ * normal nodes: check if we're on a read-only mounted
+ * filingsystem and bomb out if we're trying to write.
+ */
+ if ((accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY))
+ return (EROFS);
+ break;
+ case VBLK:
+ case VCHR:
+ case VSOCK:
+ case VFIFO:
+ /*
+ * special nodes: even on read-only mounted filingsystems
+ * these are allowed to be written to if permissions allow.
+ */
+ break;
+ default:
+ /* no idea what this is */
+ return (EINVAL);
+ }
+
+ /* noone may write immutable files */
+ /* TODO: get chflags(2) flags from extened attribute. */
+#if 0
+ if ((mode & VWRITE) && (flags & IMMUTABLE))
+ return (EPERM);
+#endif
+
+ mode = udf_getaccessmode(udf_node);
+
+ if (udf_node->fe) {
+ uid = udf_node->fe->uid;
+ gid = udf_node->fe->gid;
+ }
+ else {
+ uid = udf_node->efe->uid;
+ gid = udf_node->efe->gid;
+ }
+
+ return (vaccess(vp->v_type, mode, uid, gid, accmode, ap->a_cred, NULL));
+}
+
+/* --------------------------------------------------------------------- */
+#ifdef WRITE_SUPPORT
+int
+udf_create(void *v)
+{
+ struct vop_create_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ struct vattr *vap = ap->a_vap;
+ struct componentname *cnp = ap->a_cnp;
+ int error;
+
+ DPRINTF(CALL, ("udf_create called\n"));
+ error = udf_create_node(dvp, vpp, vap, cnp);
+
+ if (error || !(cnp->cn_flags & SAVESTART))
+ PNBUF_PUT(cnp->cn_pnbuf);
+ vput(dvp);
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_mknod(void *v)
+{
+ struct vop_mknod_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ struct vattr *vap = ap->a_vap;
+ struct componentname *cnp = ap->a_cnp;
+ int error;
+
+ DPRINTF(CALL, ("udf_mknod called\n"));
+ error = udf_create_node(dvp, vpp, vap, cnp);
+
+ if (error || !(cnp->cn_flags & SAVESTART))
+ PNBUF_PUT(cnp->cn_pnbuf);
+ vput(dvp);
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_mkdir(void *v)
+{
+ struct vop_mkdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ struct vattr *vap = ap->a_vap;
+ struct componentname *cnp = ap->a_cnp;
+ int error;
+
+ DPRINTF(CALL, ("udf_mkdir called\n"));
+ error = udf_create_node(dvp, vpp, vap, cnp);
+
+ if (error || !(cnp->cn_flags & SAVESTART))
+ PNBUF_PUT(cnp->cn_pnbuf);
+ vput(dvp);
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_do_link(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
+{
+ struct udf_node *udf_node, *dir_node;
+ struct vattr vap;
+ int error;
+
+ DPRINTF(CALL, ("udf_link called\n"));
+ error = 0;
+
+ /* some quick checks */
+ if (vp->v_type == VDIR)
+ return (EPERM); /* can't link a directory */
+ if (dvp->v_mount != vp->v_mount)
+ return (EXDEV); /* can't link across devices */
+ if (dvp == vp)
+ return (EPERM); /* can't be the same */
+
+ /* lock node */
+ error = vn_lock(vp, LK_EXCLUSIVE);
+ if (error)
+ return (error);
+
+ /* get attributes */
+ dir_node = VTOI(dvp);
+ udf_node = VTOI(vp);
+
+ error = VOP_GETATTR(vp, &vap, FSCRED);
+ if (error) {
+ VOP_UNLOCK(vp);
+ return (error);
+ }
+
+ /* check link count overflow */
+ if (vap.va_nlink >= (1<<16)-1) { /* uint16_t */
+ VOP_UNLOCK(vp);
+ return (EMLINK);
+ }
+
+ error = udf_dir_attach(dir_node->ump, dir_node, udf_node, &vap, cnp);
+ if (error)
+ VOP_UNLOCK(vp);
+ return (error);
+}
+
+int
+udf_link(void *v)
+{
+ struct vop_link_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+ struct componentname *cnp = ap->a_cnp;
+ int error;
+
+ error = udf_do_link(dvp, vp, cnp);
+ if (error)
+ VOP_ABORTOP(dvp, cnp);
+
+ VN_KNOTE(vp, NOTE_LINK);
+ VN_KNOTE(dvp, NOTE_WRITE);
+ vput(dvp);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int
+udf_do_symlink(struct udf_node *udf_node, char *target)
+{
+ struct pathcomp pathcomp;
+ uint8_t *pathbuf, *pathpos, *compnamepos;
+ char *mntonname;
+ int pathlen, len, compnamelen, mntonnamelen;
+ int error;
+
+ /* process `target' to an UDF structure */
+ pathbuf = malloc(UDF_SYMLINKBUFLEN, M_UDFTEMP, M_WAITOK);
+ pathpos = pathbuf;
+ pathlen = 0;
+
+ if (*target == '/') {
+ /* symlink starts from the root */
+ len = UDF_PATH_COMP_SIZE;
+ memset(&pathcomp, 0, len);
+ pathcomp.type = UDF_PATH_COMP_ROOT;
+
+ /* check if its mount-point relative! */
+ mntonname = udf_node->ump->vfs_mountp->mnt_stat.f_mntonname;
+ mntonnamelen = strlen(mntonname);
+ if (strlen(target) >= mntonnamelen) {
+ if (strncmp(target, mntonname, mntonnamelen) == 0) {
+ pathcomp.type = UDF_PATH_COMP_MOUNTROOT;
+ target += mntonnamelen;
+ }
+ } else {
+ target++;
+ }
+
+ memcpy(pathpos, &pathcomp, len);
+ pathpos += len;
+ pathlen += len;
+ }
+
+ error = 0;
+ while (*target) {
+ /* ignore multiple '/' */
+ while (*target == '/') {
+ target++;
+ }
+ if (!*target)
+ break;
+
+ /* extract component name */
+ compnamelen = 0;
+ compnamepos = target;
+ while ((*target) && (*target != '/')) {
+ target++;
+ compnamelen++;
+ }
+
+ /* just trunc if too long ?? (security issue) */
+ if (compnamelen >= 127) {
+ error = ENAMETOOLONG;
+ break;
+ }
+
+ /* convert unix name to UDF name */
+ len = sizeof(struct pathcomp);
+ memset(&pathcomp, 0, len);
+ pathcomp.type = UDF_PATH_COMP_NAME;
+ len = UDF_PATH_COMP_SIZE;
+
+ if ((compnamelen == 2) && (strncmp(compnamepos, "..", 2) == 0))
+ pathcomp.type = UDF_PATH_COMP_PARENTDIR;
+ if ((compnamelen == 1) && (*compnamepos == '.'))
+ pathcomp.type = UDF_PATH_COMP_CURDIR;
+
+ if (pathcomp.type == UDF_PATH_COMP_NAME) {
+ unix_to_udf_name(
+ (char *) &pathcomp.ident, &pathcomp.l_ci,
+ compnamepos, compnamelen,
+ &udf_node->ump->logical_vol->desc_charset);
+ len = UDF_PATH_COMP_SIZE + pathcomp.l_ci;
+ }
+
+ if (pathlen + len >= UDF_SYMLINKBUFLEN) {
+ error = ENAMETOOLONG;
+ break;
+ }
+
+ memcpy(pathpos, &pathcomp, len);
+ pathpos += len;
+ pathlen += len;
+ }
+
+ if (error) {
+ /* aparently too big */
+ free(pathbuf, M_UDFTEMP);
+ return (error);
+ }
+
+ error = udf_grow_node(udf_node, pathlen);
+ if (error) {
+ /* failed to pregrow node */
+ free(pathbuf, M_UDFTEMP);
+ return (error);
+ }
+
+ /* write out structure on the new file */
+ error = vn_rdwr(UIO_WRITE, udf_node->vnode,
+ pathbuf, pathlen, 0,
+ UIO_SYSSPACE, IO_NODELOCKED | IO_ALTSEMANTICS,
+ FSCRED, NULL, NULL);
+
+ /* return status of symlink contents writeout */
+ free(pathbuf, M_UDFTEMP);
+ return (error);
+}
+
+
+int
+udf_symlink(void *v)
+{
+ struct vop_symlink_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ char *a_target;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ struct vattr *vap = ap->a_vap;
+ struct componentname *cnp = ap->a_cnp;
+ struct udf_node *dir_node;
+ struct udf_node *udf_node;
+ int error;
+
+ error = udf_create_node(dvp, vpp, vap, cnp);
+ KASSERT(((error == 0) && (*vpp != NULL)) || ((error && (*vpp == NULL))));
+ if (!error) {
+ dir_node = VTOI(dvp);
+ udf_node = VTOI(*vpp);
+ KASSERT(udf_node);
+ error = udf_do_symlink(udf_node, ap->a_target);
+ if (error) {
+ /* remove node */
+ udf_shrink_node(udf_node, 0);
+ udf_dir_detach(udf_node->ump, dir_node, udf_node, cnp);
+ }
+ }
+ if (error || !(cnp->cn_flags & SAVESTART))
+ PNBUF_PUT(cnp->cn_pnbuf);
+ vput(dvp);
+ return (error);
+}
+#endif
+/* --------------------------------------------------------------------- */
+
+int
+udf_readlink(struct vop_readlink_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct uio *uio = ap->a_uio;
+ struct pathcomp pathcomp;
+ struct udf_node *udf_node = VTOI(vp);
+ int pathlen, targetlen, namelen, mntonnamelen, len, l_ci, filelen;
+ int first, error;
+ char *mntonname;
+ uint8_t *pathbuf, *targetbuf, *tmpname;
+ uint8_t *pathpos, *targetpos;
+
+ DPRINTF(CALL, ("udf_readlink called\n"));
+
+ if (udf_node->efe)
+ filelen = le64toh(udf_node->efe->inf_len);
+ else
+ filelen = le64toh(udf_node->fe->inf_len);
+
+ /* claim temporary buffers for translation */
+ pathbuf = malloc(UDF_SYMLINKBUFLEN, M_UDFTEMP, M_WAITOK);
+ targetbuf = malloc(PATH_MAX+1, M_UDFTEMP, M_WAITOK);
+ tmpname = malloc(PATH_MAX+1, M_UDFTEMP, M_WAITOK);
+ memset(pathbuf, 0, UDF_SYMLINKBUFLEN);
+ memset(targetbuf, 0, PATH_MAX);
+
+
+ /* read contents of file in our temporary buffer */
+ error = vn_rdwr(UIO_READ, vp,
+ pathbuf, filelen, 0,
+ UIO_SYSSPACE, IO_NODELOCKED,
+ FSCRED, NULL, NULL, curthread);
+ if (error) {
+ /* failed to read in symlink contents */
+ free(pathbuf, M_UDFTEMP);
+ free(targetbuf, M_UDFTEMP);
+ free(tmpname, M_UDFTEMP);
+ return (error);
+ }
+
+ /* convert to a unix path */
+ pathpos = pathbuf;
+ pathlen = 0;
+ targetpos = targetbuf;
+ targetlen = PATH_MAX;
+ mntonname = udf_node->ump->vfs_mountp->mnt_stat.f_mntonname;
+ mntonnamelen = strlen(mntonname);
+
+ error = 0;
+ first = 1;
+ while (filelen - pathlen >= UDF_PATH_COMP_SIZE) {
+ len = UDF_PATH_COMP_SIZE;
+ memcpy(&pathcomp, pathpos, len);
+ l_ci = pathcomp.l_ci;
+ switch (pathcomp.type) {
+ case UDF_PATH_COMP_ROOT :
+ /* XXX should check for l_ci; bugcompatible now */
+ if ((targetlen < 1) || !first) {
+ error = EINVAL;
+ break;
+ }
+ *targetpos++ = '/'; targetlen--;
+ break;
+ case UDF_PATH_COMP_MOUNTROOT :
+ /* XXX what should it be if l_ci > 0 ? [4/48.16.1.2] */
+ if (l_ci || (targetlen < mntonnamelen+1) || !first) {
+ error = EINVAL;
+ break;
+ }
+ memcpy(targetpos, mntonname, mntonnamelen);
+ targetpos += mntonnamelen; targetlen -= mntonnamelen;
+ if (filelen-pathlen > UDF_PATH_COMP_SIZE+l_ci) {
+ /* more follows, so must be directory */
+ *targetpos++ = '/'; targetlen--;
+ }
+ break;
+ case UDF_PATH_COMP_PARENTDIR :
+ /* XXX should check for l_ci; bugcompatible now */
+ if (targetlen < 3) {
+ error = EINVAL;
+ break;
+ }
+ *targetpos++ = '.'; targetlen--;
+ *targetpos++ = '.'; targetlen--;
+ *targetpos++ = '/'; targetlen--;
+ break;
+ case UDF_PATH_COMP_CURDIR :
+ /* XXX should check for l_ci; bugcompatible now */
+ if (targetlen < 2) {
+ error = EINVAL;
+ break;
+ }
+ *targetpos++ = '.'; targetlen--;
+ *targetpos++ = '/'; targetlen--;
+ break;
+ case UDF_PATH_COMP_NAME :
+ if (l_ci == 0) {
+ error = EINVAL;
+ break;
+ }
+ memset(tmpname, 0, PATH_MAX);
+ memcpy(&pathcomp, pathpos, len + l_ci);
+ udf_to_unix_name(udf_node->ump, tmpname, MAXPATHLEN,
+ pathcomp.ident, l_ci);
+ namelen = strlen(tmpname);
+ if (targetlen < namelen + 1) {
+ error = EINVAL;
+ break;
+ }
+ memcpy(targetpos, tmpname, namelen);
+ targetpos += namelen; targetlen -= namelen;
+ if (filelen-pathlen > UDF_PATH_COMP_SIZE+l_ci) {
+ /* more follows, so must be directory */
+ *targetpos++ = '/'; targetlen--;
+ }
+ break;
+ default :
+ error = EINVAL;
+ break;
+ }
+ first = 0;
+ if (error)
+ break;
+ pathpos += UDF_PATH_COMP_SIZE + l_ci;
+ pathlen += UDF_PATH_COMP_SIZE + l_ci;
+
+ }
+ /* all processed? */
+ if (filelen - pathlen > 0)
+ error = EINVAL;
+
+ /* uiomove() to destination */
+ if (!error)
+ uiomove(targetbuf, PATH_MAX - targetlen, uio);
+
+ free(pathbuf, M_UDFTEMP);
+ free(targetbuf, M_UDFTEMP);
+ free(tmpname, M_UDFTEMP);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Check if source directory is in the path of the target directory. Target
+ * is supplied locked, source is unlocked. The target is always vput before
+ * returning. Modeled after UFS.
+ *
+ * If source is on the path from target to the root, return error.
+ */
+#ifdef WRITE_SUPPORT
+static int
+udf_on_rootpath(struct udf_node *source, struct udf_node *target)
+{
+ struct udf_mount *ump = target->ump;
+ struct udf_node *res_node;
+ struct long_ad icb_loc, *root_icb;
+ const char *name;
+ int namelen;
+ int error, found;
+
+ name = "..";
+ namelen = 2;
+ error = 0;
+ res_node = target;
+
+ root_icb = &ump->fileset_desc->rootdir_icb;
+
+ /* if nodes are equal, it is no use looking */
+ if (udf_compare_icb(&source->loc, &target->loc) == 0) {
+ error = EEXIST;
+ goto out;
+ }
+
+ /* nothing can exist before the root */
+ if (udf_compare_icb(root_icb, &target->loc) == 0) {
+ error = 0;
+ goto out;
+ }
+
+ for (;;) {
+ DPRINTF(NODE, ("udf_on_rootpath : "
+ "source vp %p, looking at vp %p\n",
+ source->vnode, res_node->vnode));
+
+ /* sanity check */
+ if (res_node->vnode->v_type != VDIR) {
+ error = ENOTDIR;
+ goto out;
+ }
+
+ /* go down one level */
+ error = udf_lookup_name_in_dir(res_node->vnode, name, namelen,
+ &icb_loc, &found);
+ DPRINTF(NODE, ("\tlookup of '..' resulted in error %d, "
+ "found %d\n", error, found));
+
+ if (!found)
+ error = ENOENT;
+ if (error)
+ goto out;
+
+ /* did we encounter source node? */
+ if (udf_compare_icb(&icb_loc, &source->loc) == 0) {
+ error = EINVAL;
+ goto out;
+ }
+
+ /* did we encounter the root node? */
+ if (udf_compare_icb(&icb_loc, root_icb) == 0) {
+ error = 0;
+ goto out;
+ }
+
+ /* push our intermediate node, we're done with it */
+ /* DPRINTF(NODE, ("\tvput %p\n", target->vnode)); */
+ vput(res_node->vnode);
+
+ DPRINTF(NODE, ("\tgetting the .. node\n"));
+ error = udf_get_node(ump, &icb_loc, &res_node);
+
+ if (error) { /* argh, bail out */
+ KASSERT(res_node == NULL);
+ // res_node = NULL;
+ goto out;
+ }
+ }
+out:
+ DPRINTF(NODE, ("\tresult: %svalid, error = %d\n", error?"in":"", error));
+
+ /* put our last node */
+ if (res_node)
+ vput(res_node->vnode);
+
+ return (error);
+}
+
+/* note: i tried to follow the logics of the tmpfs rename code */
+int
+udf_rename(void *v)
+{
+ struct vop_rename_args /* {
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+ } */ *ap = v;
+ struct vnode *tvp = ap->a_tvp;
+ struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *fvp = ap->a_fvp;
+ struct vnode *fdvp = ap->a_fdvp;
+ struct componentname *tcnp = ap->a_tcnp;
+ struct componentname *fcnp = ap->a_fcnp;
+ struct udf_node *fnode, *fdnode, *tnode, *tdnode;
+ struct vattr fvap, tvap;
+ int error;
+
+ DPRINTF(CALL, ("udf_rename called\n"));
+
+ /* disallow cross-device renames */
+ if (fvp->v_mount != tdvp->v_mount ||
+ (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
+ error = EXDEV;
+ goto out_unlocked;
+ }
+
+ fnode = VTOI(fvp);
+ fdnode = VTOI(fdvp);
+ tnode = (tvp == NULL) ? NULL : VTOI(tvp);
+ tdnode = VTOI(tdvp);
+
+ /* lock our source dir */
+ if (fdnode != tdnode) {
+ error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY);
+ if (error != 0)
+ goto out_unlocked;
+ }
+
+ /* get info about the node to be moved */
+ error = VOP_GETATTR(fvp, &fvap, FSCRED);
+ KASSERT(error == 0);
+
+ /* check when to delete the old already existing entry */
+ if (tvp) {
+ /* get info about the node to be moved to */
+ error = VOP_GETATTR(fvp, &tvap, FSCRED);
+ KASSERT(error == 0);
+
+ /* if both dirs, make sure the destination is empty */
+ if (fvp->v_type == VDIR && tvp->v_type == VDIR) {
+ if (tvap.va_nlink > 2) {
+ error = ENOTEMPTY;
+ goto out;
+ }
+ }
+ /* if moving dir, make sure destination is dir too */
+ if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
+ error = ENOTDIR;
+ goto out;
+ }
+ /* if we're moving a non-directory, make sure dest is no dir */
+ if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
+ error = EISDIR;
+ goto out;
+ }
+ }
+
+ /* check if moving a directory to a new parent is allowed */
+ if ((fdnode != tdnode) && (fvp->v_type == VDIR)) {
+ /* release tvp since we might encounter it and lock up */
+ if (tvp)
+ vput(tvp);
+
+ /* vref tdvp since we lose its ref in udf_on_rootpath */
+ vref(tdvp);
+
+ /* search if fnode is a component of tdnode's path to root */
+ error = udf_on_rootpath(fnode, tdnode);
+
+ DPRINTF(NODE, ("Dir rename allowed ? %s\n", error ? "NO":"YES"));
+
+ if (error) {
+ /* compensate for our vref earlier */
+ vrele(tdvp);
+ goto out;
+ }
+
+ /* relock tdvp; its still here due to the vref earlier */
+ vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
+
+ /*
+ * re-lookup tvp since the parent has been unlocked, so could
+ * have changed/removed in the meantime.
+ */
+ tcnp->cn_flags &= ~SAVESTART;
+ error = relookup(tdvp, &tvp, tcnp);
+ if (error) {
+ vput(tdvp);
+ goto out;
+ }
+ tnode = (tvp == NULL) ? NULL : VTOI(tvp);
+ }
+
+ /* remove existing entry if present */
+ if (tvp)
+ udf_dir_detach(tdnode->ump, tdnode, tnode, tcnp);
+
+ /* create new directory entry for the node */
+ error = udf_dir_attach(tdnode->ump, tdnode, fnode, &fvap, tcnp);
+ if (error)
+ goto out;
+
+ /* unlink old directory entry for the node, if failing, unattach new */
+ error = udf_dir_detach(tdnode->ump, fdnode, fnode, fcnp);
+ if (error)
+ udf_dir_detach(tdnode->ump, tdnode, fnode, tcnp);
+ if (error)
+ goto out;
+
+ /* update tnode's '..' if moving directory to new parent */
+ if ((fdnode != tdnode) && (fvp->v_type == VDIR)) {
+ /* update fnode's '..' entry */
+ error = udf_dir_update_rootentry(fnode->ump, fnode, tdnode);
+ if (error) {
+ /* 'try' to recover from this situation */
+ udf_dir_attach(tdnode->ump, fdnode, fnode, &fvap, fcnp);
+ udf_dir_detach(tdnode->ump, tdnode, fnode, tcnp);
+ }
+ }
+
+out:
+ if (fdnode != tdnode)
+ VOP_UNLOCK(fdvp);
+
+out_unlocked:
+ VOP_ABORTOP(tdvp, tcnp);
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ VOP_ABORTOP(fdvp, fcnp);
+
+ /* release source nodes. */
+ vrele(fdvp);
+ vrele(fvp);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_remove(void *v)
+{
+ struct vop_remove_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap = v;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+ struct componentname *cnp = ap->a_cnp;
+ struct udf_node *dir_node = VTOI(dvp);
+ struct udf_node *udf_node = VTOI(vp);
+ struct udf_mount *ump = dir_node->ump;
+ int error;
+
+ DPRINTF(CALL, ("udf_remove called\n"));
+ if (vp->v_type != VDIR) {
+ error = udf_dir_detach(ump, dir_node, udf_node, cnp);
+ DPRINTFIF(NODE, error, ("\tgot error removing file\n"));
+ } else {
+ DPRINTF(NODE, ("\tis a directory: perm. denied\n"));
+ error = EPERM;
+ }
+
+ if (error == 0) {
+ VN_KNOTE(vp, NOTE_DELETE);
+ VN_KNOTE(dvp, NOTE_WRITE);
+ }
+
+ if (dvp == vp)
+ vrele(vp);
+ else
+ vput(vp);
+ vput(dvp);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_rmdir(void *v)
+{
+ struct vop_rmdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct vnode *dvp = ap->a_dvp;
+ struct componentname *cnp = ap->a_cnp;
+ struct udf_node *dir_node = VTOI(dvp);
+ struct udf_node *udf_node = VTOI(vp);
+ struct udf_mount *ump = dir_node->ump;
+ int refcnt, error;
+
+ DPRINTF(NOTIMPL, ("udf_rmdir called\n"));
+
+ /* don't allow '.' to be deleted */
+ if (dir_node == udf_node) {
+ vrele(dvp);
+ vput(vp);
+ return (EINVAL);
+ }
+
+ /* check to see if the directory is empty */
+ error = 0;
+ if (dir_node->fe) {
+ refcnt = le16toh(udf_node->fe->link_cnt);
+ } else {
+ refcnt = le16toh(udf_node->efe->link_cnt);
+ }
+ if (refcnt > 1) {
+ /* NOT empty */
+ vput(dvp);
+ vput(vp);
+ return (ENOTEMPTY);
+ }
+
+ /* detach the node from the directory */
+ error = udf_dir_detach(ump, dir_node, udf_node, cnp);
+ if (error == 0) {
+ cache_purge(vp);
+// cache_purge(dvp); /* XXX from msdosfs, why? */
+ VN_KNOTE(vp, NOTE_DELETE);
+ }
+ DPRINTFIF(NODE, error, ("\tgot error removing file\n"));
+
+ /* unput the nodes and exit */
+ vput(dvp);
+ vput(vp);
+
+ return (error);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_fsync(void *v)
+{
+ struct vop_fsync_args /* {
+ struct vnode *a_vp;
+ kauth_cred_t a_cred;
+ int a_flags;
+ off_t offlo;
+ off_t offhi;
+ struct proc *a_p;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ int error, flags, wait;
+
+ DPRINTF(SYNC, ("udf_fsync called on %p : %s, %s\n",
+ udf_node,
+ (ap->a_flags & FSYNC_WAIT) ? "wait":"no wait",
+ (ap->a_flags & FSYNC_DATAONLY) ? "data_only":"complete"));
+
+ /* flush data and wait for it when requested */
+ wait = (ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0;
+ vflushbuf(vp, wait);
+
+ if (udf_node == NULL) {
+ printf("udf_fsync() called on NULL udf_node!\n");
+ return (0);
+ }
+ if (vp->v_tag != VT_UDF) {
+ printf("udf_fsync() called on node not tagged as UDF node!\n");
+ return (0);
+ }
+
+ /* set our times */
+ udf_itimes(udf_node, NULL, NULL, NULL);
+
+ /* if called when mounted readonly, never write back */
+ if (vp->v_mount->mnt_flag & MNT_RDONLY)
+ return (0);
+
+ /* if only data is requested, return */
+ if (ap->a_flags & FSYNC_DATAONLY)
+ return (0);
+
+ /* check if the node is dirty 'enough'*/
+ flags = udf_node->i_flags & (IN_MODIFIED | IN_ACCESSED);
+ if (flags == 0)
+ return (0);
+
+ /* if we don't have to wait, check for IO pending */
+ if (!wait) {
+ if (vp->v_numoutput > 0) {
+ DPRINTF(SYNC, ("udf_fsync %p, rejecting on v_numoutput\n", udf_node));
+ return (0);
+ }
+ if (udf_node->outstanding_bufs > 0) {
+ DPRINTF(SYNC, ("udf_fsync %p, rejecting on outstanding_bufs\n", udf_node));
+ return (0);
+ }
+ if (udf_node->outstanding_nodedscr > 0) {
+ DPRINTF(SYNC, ("udf_fsync %p, rejecting on outstanding_nodedscr\n", udf_node));
+ return (0);
+ }
+ }
+
+ /* wait until vp->v_numoutput reaches zero i.e. is finished */
+ if (wait) {
+ DPRINTF(SYNC, ("udf_fsync %p, waiting\n", udf_node));
+ mutex_enter(&vp->v_interlock);
+ while (vp->v_numoutput) {
+ DPRINTF(SYNC, ("udf_fsync %p, v_numoutput %d\n", udf_node, vp->v_numoutput));
+ cv_timedwait(&vp->v_cv, &vp->v_interlock, hz/8);
+ }
+ mutex_exit(&vp->v_interlock);
+ DPRINTF(SYNC, ("udf_fsync %p, fin wait\n", udf_node));
+ }
+
+ /* write out node and wait for it if requested */
+ DPRINTF(SYNC, ("udf_fsync %p, writeout node\n", udf_node));
+ error = udf_writeout_node(udf_node, wait);
+ if (error)
+ return (error);
+
+ /* TODO/XXX if ap->a_flags & FSYNC_CACHE, we ought to do a disc sync */
+
+ return (0);
+}
+
+/* --------------------------------------------------------------------- */
+
+int
+udf_advlock(void *v)
+{
+ struct vop_advlock_args /* {
+ struct vnode *a_vp;
+ void *a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ } */ *ap = v;
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+ struct file_entry *fe;
+ struct extfile_entry *efe;
+ uint64_t file_size;
+
+ DPRINTF(LOCKING, ("udf_advlock called\n"));
+
+ /* get directory filesize */
+ if (udf_node->fe) {
+ fe = udf_node->fe;
+ file_size = le64toh(fe->inf_len);
+ } else {
+ assert(udf_node->efe);
+ efe = udf_node->efe;
+ file_size = le64toh(efe->inf_len);
+ }
+
+ return (lf_advlock(ap, &udf_node->lockf, file_size));
+}
+#endif
+
+/*
+ * File specific ioctls.
+ */
+static int
+udf_ioctl(struct vop_ioctl_args *ap)
+{
+ printf("%s called\n", __func__);
+ return (ENOTTY);
+}
+
+static int
+udf_print(struct vop_print_args *ap)
+{
+ struct vnode *vp = ap->a_vp;
+ struct udf_node *udf_node = VTOI(vp);
+
+ printf(" ino %u, on dev %s", (uint32_t)udf_node->hash_id,
+ devtoname(udf_node->ump->dev));
+ if (vp->v_type == VFIFO)
+ fifo_printinfo(vp);
+ printf("\n");
+ return (0);
+}
+
+static int
+udf_vptofh(struct vop_vptofh_args *ap)
+{
+ struct udf_node *udf_node = VTOI(ap->a_vp);
+ struct udf_fid *ufid = (struct udf_fid *)ap->a_fhp;
+
+ ufid->len = sizeof(struct udf_fid);
+ ufid->ino = udf_node->hash_id;
+
+ return (0);
+}
+
Index: sys/fs/udf2/udfio.h
===================================================================
--- /dev/null
+++ sys/fs/udf2/udfio.h
@@ -0,0 +1,198 @@
+
+/* Shared between kernel & process */
+
+#ifndef _SYS_UDFIO_H_
+#define _SYS_UDFIO_H_
+
+#ifndef _KERNEL
+#include <sys/types.h>
+#endif
+#include <sys/ioccom.h>
+
+struct udf_session_info {
+ uint32_t session_num;
+
+ uint16_t sector_size;
+ uint16_t num_sessions;
+ uint32_t session_start_addr;
+ uint32_t session_end_addr;
+
+ uint16_t num_tracks;
+ uint8_t first_track;
+ uint16_t session_first_track;
+ uint16_t session_last_track;
+};
+#define UDFIOTEST _IOWR('c',300, struct udf_session_info)
+
+#if defined(_KERNEL) || defined(_EXPOSE_MMC)
+/* not exposed to userland yet until its completely mature */
+/*
+ * MMC device abstraction interface.
+ *
+ * It gathers information from GET_CONFIGURATION, READ_DISCINFO,
+ * READ_TRACKINFO, READ_TOC2, READ_CD_CAPACITY and GET_CONFIGURATION
+ * SCSI/ATAPI calls regardless if its a legacy CD-ROM/DVD-ROM device or a MMC
+ * standard recordable device.
+ */
+struct mmc_discinfo {
+ uint16_t mmc_profile;
+ uint16_t mmc_class;
+
+ uint8_t disc_state;
+ uint8_t last_session_state;
+ uint8_t bg_format_state;
+ uint8_t link_block_penalty; /* in sectors */
+
+ uint64_t mmc_cur; /* current MMC_CAPs */
+ uint64_t mmc_cap; /* possible MMC_CAPs */
+
+ uint32_t disc_flags; /* misc flags */
+
+ uint32_t disc_id;
+ uint64_t disc_barcode;
+ uint8_t application_code; /* 8 bit really */
+
+ uint8_t unused1[3]; /* padding */
+
+ uint32_t last_possible_lba; /* last leadout start adr. */
+ uint32_t sector_size;
+
+ uint16_t num_sessions;
+ uint16_t num_tracks; /* derived */
+
+ uint16_t first_track;
+ uint16_t first_track_last_session;
+ uint16_t last_track_last_session;
+
+ uint16_t unused2; /* padding/misc info resv. */
+
+ uint16_t reserved1[4]; /* MMC-5 track resources */
+ uint32_t reserved2[3]; /* MMC-5 POW resources */
+
+ uint32_t reserved3[8]; /* MMC-5+ */
+};
+#define FKIOCTL 0x80000000 /* kernel original ioctl */
+#define MMCGETDISCINFO _IOR('c', 28, struct mmc_discinfo)
+
+#define MMC_CLASS_UNKN 0
+#define MMC_CLASS_DISC 1
+#define MMC_CLASS_CD 2
+#define MMC_CLASS_DVD 3
+#define MMC_CLASS_MO 4
+#define MMC_CLASS_BD 5
+#define MMC_CLASS_FILE 0xffff /* emulation mode */
+
+#define MMC_DFLAGS_BARCODEVALID (1 << 0) /* barcode is present and valid */
+#define MMC_DFLAGS_DISCIDVALID (1 << 1) /* discid is present and valid */
+#define MMC_DFLAGS_APPCODEVALID (1 << 2) /* application code valid */
+#define MMC_DFLAGS_UNRESTRICTED (1 << 3) /* restricted, then set app. code */
+
+#define MMC_DFLAGS_FLAGBITS \
+ "\10\1BARCODEVALID\2DISCIDVALID\3APPCODEVALID\4UNRESTRICTED"
+
+#define MMC_CAP_SEQUENTIAL (1 << 0) /* sequential writable only */
+#define MMC_CAP_RECORDABLE (1 << 1) /* record-able; i.e. not static */
+#define MMC_CAP_ERASABLE (1 << 2) /* drive can erase sectors */
+#define MMC_CAP_BLANKABLE (1 << 3) /* media can be blanked */
+#define MMC_CAP_FORMATTABLE (1 << 4) /* media can be formatted */
+#define MMC_CAP_REWRITABLE (1 << 5) /* media can be rewritten */
+#define MMC_CAP_MRW (1 << 6) /* Mount Rainier formatted */
+#define MMC_CAP_PACKET (1 << 7) /* using packet recording */
+#define MMC_CAP_STRICTOVERWRITE (1 << 8) /* only writes a packet at a time */
+#define MMC_CAP_PSEUDOOVERWRITE (1 << 9) /* overwrite through replacement */
+#define MMC_CAP_ZEROLINKBLK (1 << 10) /* zero link block length capable */
+#define MMC_CAP_HW_DEFECTFREE (1 << 11) /* hardware defect management */
+
+#define MMC_CAP_FLAGBITS \
+ "\10\1SEQUENTIAL\2RECORDABLE\3ERASABLE\4BLANKABLE\5FORMATTABLE" \
+ "\6REWRITABLE\7MRW\10PACKET\11STRICTOVERWRITE\12PSEUDOOVERWRITE" \
+ "\13ZEROLINKBLK\14HW_DEFECTFREE"
+
+#define MMC_STATE_EMPTY 0
+#define MMC_STATE_INCOMPLETE 1
+#define MMC_STATE_FULL 2
+#define MMC_STATE_CLOSED 3
+
+#define MMC_BGFSTATE_UNFORM 0
+#define MMC_BGFSTATE_STOPPED 1
+#define MMC_BGFSTATE_RUNNING 2
+#define MMC_BGFSTATE_COMPLETED 3
+
+
+struct mmc_trackinfo {
+ uint16_t tracknr; /* IN/OUT */
+ uint16_t sessionnr;
+
+ uint8_t track_mode;
+ uint8_t data_mode;
+
+ uint16_t flags;
+
+ uint32_t track_start;
+ uint32_t next_writable;
+ uint32_t free_blocks;
+ uint32_t packet_size;
+ uint32_t track_size;
+ uint32_t last_recorded;
+};
+#define MMCGETTRACKINFO _IOWR('c', 29, struct mmc_trackinfo)
+
+#define MMC_TRACKINFO_COPY (1 << 0)
+#define MMC_TRACKINFO_DAMAGED (1 << 1)
+#define MMC_TRACKINFO_FIXED_PACKET (1 << 2)
+#define MMC_TRACKINFO_INCREMENTAL (1 << 3)
+#define MMC_TRACKINFO_BLANK (1 << 4)
+#define MMC_TRACKINFO_RESERVED (1 << 5)
+#define MMC_TRACKINFO_NWA_VALID (1 << 6)
+#define MMC_TRACKINFO_LRA_VALID (1 << 7)
+#define MMC_TRACKINFO_DATA (1 << 8)
+#define MMC_TRACKINFO_AUDIO (1 << 9)
+#define MMC_TRACKINFO_AUDIO_4CHAN (1 << 10)
+#define MMC_TRACKINFO_PRE_EMPH (1 << 11)
+
+#define MMC_TRACKINFO_FLAGBITS \
+ "\10\1COPY\2DAMAGED\3FIXEDPACKET\4INCREMENTAL\5BLANK" \
+ "\6RESERVED\7NWA_VALID\10LRA_VALID\11DATA\12AUDIO" \
+ "\13AUDIO_4CHAN\14PRE_EMPH"
+
+struct mmc_op {
+ uint16_t operation; /* IN */
+ uint16_t mmc_profile; /* IN */
+
+ /* parameters to operation */
+ uint16_t tracknr; /* IN */
+ uint16_t sessionnr; /* IN */
+ uint32_t extent; /* IN */
+
+ uint32_t reserved[4];
+};
+#define MMCOP _IOWR('c', 30, struct mmc_op)
+
+#define MMC_OP_SYNCHRONISECACHE 1
+#define MMC_OP_CLOSETRACK 2
+#define MMC_OP_CLOSESESSION 3
+#define MMC_OP_FINALISEDISC 4
+#define MMC_OP_RESERVETRACK 5
+#define MMC_OP_RESERVETRACK_NWA 6
+#define MMC_OP_UNRESERVETRACK 7
+#define MMC_OP_REPAIRTRACK 8
+#define MMC_OP_UNCLOSELASTSESSION 9
+#define MMC_OP_MAX 9
+
+struct mmc_writeparams {
+ uint16_t tracknr; /* IN */
+ uint16_t mmc_class; /* IN */
+ uint32_t mmc_cur; /* IN */
+ uint32_t blockingnr; /* IN */
+
+ /* when tracknr == 0 */
+ uint8_t track_mode; /* IN; normally 5 */
+ uint8_t data_mode; /* IN; normally 2 */
+};
+#define MMC_TRACKMODE_DEFAULT 5 /* data, incremental recording */
+#define MMC_DATAMODE_DEFAULT 2 /* CDROM XA disc */
+#define MMCSETUPWRITEPARAMS _IOW('c', 31, struct mmc_writeparams)
+
+#endif /* _KERNEL || _EXPOSE_MMC */
+
+#endif /* !_SYS_UDFIO_H_ */
Index: sys/modules/Makefile
===================================================================
--- sys/modules/Makefile
+++ sys/modules/Makefile
@@ -375,6 +375,8 @@
ubsec \
udf \
udf_iconv \
+ udf2 \
+ udf2_iconv \
ufs \
uinput \
unionfs \
Index: sys/modules/udf2/Makefile
===================================================================
--- /dev/null
+++ sys/modules/udf2/Makefile
@@ -0,0 +1,13 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../fs/udf2
+
+KMOD= udf2
+
+SRCS= udf_readwrite.c udf_subr.c udf_allocation.c \
+ udf_osta.c udf_vfsops.c udf_vnops.c udf_filenames.c
+# udf_strat_bootstrap.c udf_strat_direct.c udf_strat_rmw.c udf_strat_sequential.c
+SRCS+= vnode_if.h
+EXPORT_SYMS= udf_iconv
+
+.include <bsd.kmod.mk>
Index: sys/modules/udf2_iconv/Makefile
===================================================================
--- /dev/null
+++ sys/modules/udf2_iconv/Makefile
@@ -0,0 +1,9 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../fs/udf2
+KMOD= udf2_iconv
+SRCS= udf_iconv.c
+
+CFLAGS+= -I${.CURDIR}/../../
+
+.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Sat, Jan 31, 7:09 PM (9 m, 52 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28160712
Default Alt Text
D9614.diff (476 KB)

Event Timeline