Page MenuHomeFreeBSD

D27194.id84385.diff
No OneTemporary

D27194.id84385.diff

Index: share/man/man9/Makefile
===================================================================
--- share/man/man9/Makefile
+++ share/man/man9/Makefile
@@ -401,6 +401,7 @@
vn_isdisk.9 \
vnet.9 \
vnode.9 \
+ vnode_pager_purge_range.9 \
VOP_ACCESS.9 \
VOP_ACLCHECK.9 \
VOP_ADVISE.9 \
Index: share/man/man9/vnode_pager_purge_range.9
===================================================================
--- /dev/null
+++ share/man/man9/vnode_pager_purge_range.9
@@ -0,0 +1,73 @@
+.\"
+.\" SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+.\"
+.\" Copyright (c) 2021 The FreeBSD Foundation
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.Dd January 26, 2021
+.Dt VNODE_PAGER_PURGE_RANGE 9
+.Os
+.Sh NAME
+.Nm vnode_pager_purge_range
+.Nd "invalidate the content of the given range in bytes from cache"
+.Sh SYNOPSIS
+.In vm/vm_extern.h
+.Ft void
+.Fn vnode_pager_purge_range "struct vnode *vp" "vm_ooffset_t startoff" "vm_ooffset_t endoff"
+.Sh DESCRIPTION
+.Nm
+invalidates content of the cache covered by the given range from the
+specified vnode
+.Fa vp .
+The range to be purged is
+.Eo [
+.Fa startoff , endoff
+.Ec ) .
+Affected pages within the specified range will be tossed away.
+.Sh IMPLEMENTATION NOTES
+Within the specified range,
+in case
+.Fa startoff
+or
+.Fa endoff
+is not aligned to page boundaries,
+partial-page area will be zeroed.
+In partial-page area,
+for content occupying whole blocks within block
+boundaries,
+the dirty bits for the corresponding blocks will be cleared.
+.Sh LOCKING
+Writer lock of the VM object of
+.Fa vp
+will be held within the function.
+.Sh SEE ALSO
+.Xr vnode 9
+.Sh HISTORY
+The
+.Nm
+manual page first appeared in
+.Fx 14 .
+.Sh AUTHORS
+This
+manual page was written by
+.An Ka Ho Ng Aq Mt khng@freebsdfoundation.org .
Index: sys/vm/vm_extern.h
===================================================================
--- sys/vm/vm_extern.h
+++ sys/vm/vm_extern.h
@@ -120,6 +120,7 @@
void vmspace_exitfree(struct proc *);
void vmspace_switch_aio(struct vmspace *);
void vnode_pager_setsize(struct vnode *, vm_ooffset_t);
+void vnode_pager_purge_range(struct vnode *, vm_ooffset_t, vm_ooffset_t);
int vslock(void *, size_t);
void vsunlock(void *, size_t);
struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
Index: sys/vm/vnode_pager.c
===================================================================
--- sys/vm/vnode_pager.c
+++ sys/vm/vnode_pager.c
@@ -421,6 +421,49 @@
return TRUE;
}
+/*
+ * Internal routine clearing partial-page content
+ */
+static void
+vnode_pager_subpage_purge(struct vm_page *m, int base, int end)
+{
+ int size = end - base;
+
+ /*
+ * Clear out partial-page garbage in case
+ * the page has been mapped.
+ */
+ pmap_zero_page_area(m, base, size);
+
+ /*
+ * Update the valid bits to reflect the blocks
+ * that have been zeroed. Some of these valid
+ * bits may have already been set.
+ */
+ vm_page_set_valid_range(m, base, size);
+
+ /*
+ * Round up "base" to the next block boundary so
+ * that the dirty bit for a partially zeroed
+ * block is not cleared.
+ */
+ base = roundup2(base, DEV_BSIZE);
+ end = rounddown2(end, DEV_BSIZE);
+
+ if (end > base) {
+ /*
+ * Clear out partial-page dirty bits.
+ *
+ * note that we do not clear out the
+ * valid bits. This would prevent
+ * bogus_page replacement from working
+ * properly.
+ */
+ vm_page_clear_dirty(m, base, end - base);
+ }
+
+}
+
/*
* Lets the VM system know about a change in size for a file.
* We adjust our own internal size and flush any cached pages in
@@ -483,39 +526,9 @@
m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT);
if (m == NULL)
goto out;
- if (!vm_page_none_valid(m)) {
- int base = (int)nsize & PAGE_MASK;
- int size = PAGE_SIZE - base;
-
- /*
- * Clear out partial-page garbage in case
- * the page has been mapped.
- */
- pmap_zero_page_area(m, base, size);
-
- /*
- * Update the valid bits to reflect the blocks that
- * have been zeroed. Some of these valid bits may
- * have already been set.
- */
- vm_page_set_valid_range(m, base, size);
-
- /*
- * Round "base" to the next block boundary so that the
- * dirty bit for a partially zeroed block is not
- * cleared.
- */
- base = roundup2(base, DEV_BSIZE);
-
- /*
- * Clear out partial-page dirty bits.
- *
- * note that we do not clear out the valid
- * bits. This would prevent bogus_page
- * replacement from working properly.
- */
- vm_page_clear_dirty(m, base, PAGE_SIZE - base);
- }
+ if (!vm_page_none_valid(m))
+ vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK,
+ PAGE_SIZE);
vm_page_xunbusy(m);
}
out:
@@ -528,6 +541,66 @@
VM_OBJECT_WUNLOCK(object);
}
+/*
+ * Lets the VM system know about the purged range for a file. We toss away any
+ * cached pages in the associated object that are affected by the purge
+ * operation. Partial-page area not aligned to page boundaries will be zeroed
+ * and the dirty blocks in DEV_BSIZE unit within a page will not be flushed.
+ *
+ * Write lock of the VM object in vnode will be held.
+ */
+void
+vnode_pager_purge_range(
+ struct vnode *vp, vm_ooffset_t startoff, vm_ooffset_t endoff)
+{
+ struct vm_page *m;
+ struct vm_object *object;
+ vm_pindex_t pgrmidx;
+ bool same_page;
+
+ object = vp->v_object;
+ pgrmidx = OFF_TO_IDX(startoff + PAGE_MASK);
+ same_page = OFF_TO_IDX(startoff) == OFF_TO_IDX(endoff);
+ if (object == NULL || endoff <= startoff)
+ return;
+
+ VM_OBJECT_WLOCK(object);
+
+ if (pgrmidx < OFF_TO_IDX(endoff))
+ vm_object_page_remove(object, pgrmidx,
+ OFF_TO_IDX(endoff), 0);
+
+ if ((startoff & PAGE_MASK) != 0) {
+ int base = (int)startoff & PAGE_MASK;
+ int end = same_page ? (int)endoff & PAGE_MASK : PAGE_SIZE;
+
+ m = vm_page_grab(vp->v_object, OFF_TO_IDX(startoff),
+ VM_ALLOC_NOCREAT);
+ if (m != NULL) {
+ if (!vm_page_none_valid(m))
+ vnode_pager_subpage_purge(m, base, end);
+ vm_page_xunbusy(m);
+ }
+
+ if (same_page)
+ goto out;
+ }
+ if ((endoff & PAGE_MASK) != 0) {
+ int base = same_page ? (int)startoff & PAGE_MASK : 0 ;
+ int end = (int)endoff & PAGE_MASK;
+
+ m = vm_page_grab(vp->v_object, OFF_TO_IDX(endoff),
+ VM_ALLOC_NOCREAT);
+ if (m != NULL) {
+ if (!vm_page_none_valid(m))
+ vnode_pager_subpage_purge(m, base, end);
+ vm_page_xunbusy(m);
+ }
+ }
+out:
+ VM_OBJECT_WUNLOCK(object);
+}
+
/*
* calculate the linear (byte) disk address of specified virtual
* file address

File Metadata

Mime Type
text/plain
Expires
Sat, Jan 18, 11:26 PM (23 m, 4 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15850395
Default Alt Text
D27194.id84385.diff (7 KB)

Event Timeline