Page MenuHomeFreeBSD

D19753.id55736.diff
No OneTemporary

D19753.id55736.diff

Index: sys/x86/iommu/busdma_dmar.c
===================================================================
--- sys/x86/iommu/busdma_dmar.c
+++ sys/x86/iommu/busdma_dmar.c
@@ -665,8 +665,8 @@
{
struct bus_dma_tag_dmar *tag;
struct bus_dmamap_dmar *map;
- vm_page_t *ma;
- vm_paddr_t pstart, pend;
+ vm_page_t *ma, fma;
+ vm_paddr_t pstart, pend, paddr;
int error, i, ma_cnt, offset;
tag = (struct bus_dma_tag_dmar *)dmat;
@@ -679,10 +679,45 @@
M_WAITOK : M_NOWAIT);
if (ma == NULL)
return (ENOMEM);
- for (i = 0; i < ma_cnt; i++)
- ma[i] = PHYS_TO_VM_PAGE(pstart + i * PAGE_SIZE);
+ fma = NULL;
+ for (i = 0; i < ma_cnt; i++) {
+ paddr = pstart + i * PAGE_SIZE;
+ ma[i] = PHYS_TO_VM_PAGE(paddr);
+ if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
+ /*
+ * If PHYS_TO_VM_PAGE() returned NULL or the
+ * vm_page was not initialized we'll use fake
+ * pages to go back and fill in the holes.
+ *
+ * Allocate the fake page array now as an
+ * indicator.
+ */
+ fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF,
+ M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT));
+ if (fma == NULL) {
+ free(ma, M_DEVBUF);
+ return (ENOMEM);
+ }
+ }
+ }
+ if (fma != NULL) {
+ /*
+ * For some addresses PHYS_TO_VM_PAGE() returned NULL
+ * or the vm_page was not initialized. Use fake pages
+ * to fill in the holes.
+ */
+ for (i = 0; i < ma_cnt; i++) {
+ paddr = pstart + i * PAGE_SIZE;
+ if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
+ vm_page_initfake(&fma[i], pstart + i * PAGE_SIZE,
+ VM_MEMATTR_DEFAULT);
+ ma[i] = &fma[i];
+ }
+ }
+ }
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
flags, segs, segp);
+ free(fma, M_DEVBUF);
free(ma, M_DEVBUF);
return (error);
}
@@ -708,37 +743,43 @@
M_WAITOK : M_NOWAIT);
if (ma == NULL)
return (ENOMEM);
- if (dumping) {
+ fma = NULL;
+ for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
+ if (pmap == kernel_pmap)
+ paddr = pmap_kextract(pstart);
+ else
+ paddr = pmap_extract(pmap, pstart);
+ ma[i] = PHYS_TO_VM_PAGE(paddr);
+ if (ma[i] == NULL || VM_PAGE_TO_PHYS(ma[i]) != paddr) {
+ /*
+ * If PHYS_TO_VM_PAGE() returned NULL or the
+ * vm_page was not initialized we'll use fake
+ * pages to go back and fill in the holes.
+ *
+ * Allocate the fake page array now as an
+ * indicator.
+ */
+ fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF,
+ M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT));
+ if (fma == NULL) {
+ free(ma, M_DEVBUF);
+ return (ENOMEM);
+ }
+ }
+ }
+ if (fma != NULL) {
/*
- * If dumping, do not attempt to call
- * PHYS_TO_VM_PAGE() at all. It may return non-NULL
- * but the vm_page returned might be not initialized,
- * e.g. for the kernel itself.
+ * For some addresses PHYS_TO_VM_PAGE() returned NULL
+ * or the vm_page was not initialized. Use fake pages
+ * to fill in the holes.
*/
- KASSERT(pmap == kernel_pmap, ("non-kernel address write"));
- fma = malloc(sizeof(struct vm_page) * ma_cnt, M_DEVBUF,
- M_ZERO | (map->cansleep ? M_WAITOK : M_NOWAIT));
- if (fma == NULL) {
- free(ma, M_DEVBUF);
- return (ENOMEM);
- }
for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
- paddr = pmap_kextract(pstart);
- vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
- ma[i] = &fma[i];
- }
- } else {
- fma = NULL;
- for (i = 0; i < ma_cnt; i++, pstart += PAGE_SIZE) {
if (pmap == kernel_pmap)
paddr = pmap_kextract(pstart);
else
paddr = pmap_extract(pmap, pstart);
- ma[i] = PHYS_TO_VM_PAGE(paddr);
- KASSERT(VM_PAGE_TO_PHYS(ma[i]) == paddr,
- ("PHYS_TO_VM_PAGE failed %jx %jx m %p",
- (uintmax_t)paddr, (uintmax_t)VM_PAGE_TO_PHYS(ma[i]),
- ma[i]));
+ vm_page_initfake(&fma[i], paddr, VM_MEMATTR_DEFAULT);
+ ma[i] = &fma[i];
}
}
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,

File Metadata

Mime Type
text/plain
Expires
Sat, Apr 4, 12:28 AM (27 m, 54 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
30787707
Default Alt Text
D19753.id55736.diff (3 KB)

Event Timeline