Index: sys/cam/ata/ata_da.c =================================================================== --- sys/cam/ata/ata_da.c +++ sys/cam/ata/ata_da.c @@ -3447,8 +3447,8 @@ maxio = softc->cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ - else if (maxio > MAXPHYS) - maxio = MAXPHYS; /* for safety */ + else if (maxio > maxphys) + maxio = maxphys; /* for safety */ if (softc->flags & ADA_FLAG_CAN_48BIT) maxio = min(maxio, 65536 * softc->params.secsize); else /* 28bit ATA command limit */ Index: sys/cam/cam_compat.c =================================================================== --- sys/cam/cam_compat.c +++ sys/cam/cam_compat.c @@ -368,7 +368,7 @@ /* Remap the CCB into kernel address space */ bzero(&mapinfo, sizeof(mapinfo)); - cam_periph_mapmem(ccb, &mapinfo, MAXPHYS); + cam_periph_mapmem(ccb, &mapinfo, maxphys); dm = ccb->cdm.matches; /* Translate in-place: old fields are smaller */ Index: sys/cam/cam_periph.c =================================================================== --- sys/cam/cam_periph.c +++ sys/cam/cam_periph.c @@ -772,7 +772,7 @@ * Map user virtual pointers into kernel virtual address space, so we can * access the memory. This is now a generic function that centralizes most * of the sanity checks on the data flags, if any. - * This also only works for up to MAXPHYS memory. Since we use + * This also only works for up to maxphys memory. Since we use * buffers to map stuff in and out, we're limited to the buffer size. */ int @@ -788,8 +788,8 @@ bzero(mapinfo, sizeof(*mapinfo)); if (maxmap == 0) maxmap = DFLTPHYS; /* traditional default */ - else if (maxmap > MAXPHYS) - maxmap = MAXPHYS; /* for safety */ + else if (maxmap > maxphys) + maxmap = maxphys; /* for safety */ switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { @@ -813,9 +813,9 @@ } /* * This request will not go to the hardware, no reason - * to be so strict. vmapbuf() is able to map up to MAXPHYS. + * to be so strict. vmapbuf() is able to map up to maxphys. */ - maxmap = MAXPHYS; + maxmap = maxphys; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: @@ -881,9 +881,9 @@ /* * This request will not go to the hardware, no reason - * to be so strict. vmapbuf() is able to map up to MAXPHYS. + * to be so strict. vmapbuf() is able to map up to maxphys. */ - maxmap = MAXPHYS; + maxmap = maxphys; break; default: return(EINVAL); @@ -911,7 +911,7 @@ * boundary. */ misaligned[i] = (lengths[i] + - (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS); + (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > maxphys); } /* Index: sys/cam/cam_xpt.c =================================================================== --- sys/cam/cam_xpt.c +++ sys/cam/cam_xpt.c @@ -553,7 +553,7 @@ * Map the pattern and match buffers into kernel * virtual address space. */ - error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); + error = cam_periph_mapmem(inccb, &mapinfo, maxphys); if (error) { inccb->ccb_h.path = old_path; Index: sys/cam/ctl/ctl_backend_block.c =================================================================== --- sys/cam/ctl/ctl_backend_block.c +++ sys/cam/ctl/ctl_backend_block.c @@ -102,9 +102,11 @@ */ #define CTLBLK_HALF_IO_SIZE (512 * 1024) #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) -#define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, MAXPHYS) -#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) +#define CTLBLK_MIN_SEG (128 * 1024) +#define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys) +#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1) #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) +#define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG) #ifdef CTLBLK_DEBUG #define DPRINTF(fmt, args...) \ @@ -189,10 +191,8 @@ int num_luns; SLIST_HEAD(, ctl_be_block_lun) lun_list; uma_zone_t beio_zone; - uma_zone_t buf_zone; -#if (CTLBLK_MAX_SEG > 131072) - uma_zone_t buf128_zone; -#endif + uma_zone_t bufmin_zone; + uma_zone_t bufmax_zone; }; static struct ctl_be_block_softc backend_block_softc; @@ -307,12 +307,13 @@ size_t len) { -#if (CTLBLK_MAX_SEG > 131072) - if (len <= 131072) - sg->addr = uma_zalloc(softc->buf128_zone, M_WAITOK); - else -#endif - sg->addr = uma_zalloc(softc->buf_zone, M_WAITOK); + if (len <= CTLBLK_MIN_SEG) { + sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK); + } else { + KASSERT(len <= CTLBLK_MAX_SEG, + ("Too large alloc %lu > %lu", len, CTLBLK_MAX_SEG)); + sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK); + } sg->len = len; } @@ -320,12 +321,13 @@ ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg) { -#if (CTLBLK_MAX_SEG > 131072) - if (sg->len <= 131072) - uma_zfree(softc->buf128_zone, sg->addr); - else -#endif - uma_zfree(softc->buf_zone, sg->addr); + if (sg->len <= CTLBLK_MIN_SEG) { + uma_zfree(softc->bufmin_zone, sg->addr); + } else { + KASSERT(sg->len <= CTLBLK_MAX_SEG, + ("Too large free %lu > %lu", sg->len, CTLBLK_MAX_SEG)); + uma_zfree(softc->bufmax_zone, sg->addr); + } } static struct ctl_be_block_io * @@ -1344,7 +1346,7 @@ else pbo = 0; len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; - for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { + for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) { /* * Setup the S/G entry for this chunk. */ @@ -1631,7 +1633,7 @@ * Setup the S/G entry for this chunk. */ ctl_alloc_seg(softc, &beio->sg_segs[i], - min(CTLBLK_MAX_SEG, len_left)); + MIN(CTLBLK_MAX_SEG, len_left)); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); @@ -2802,12 +2804,11 @@ mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); - softc->buf_zone = uma_zcreate("ctlblock", CTLBLK_MAX_SEG, + softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); -#if (CTLBLK_MAX_SEG > 131072) - softc->buf128_zone = uma_zcreate("ctlblock128", 131072, - NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); -#endif + if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) + softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG, + NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); SLIST_INIT(&softc->lun_list); return (0); } @@ -2832,10 +2833,9 @@ mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); - uma_zdestroy(softc->buf_zone); -#if (CTLBLK_MAX_SEG > 131072) - uma_zdestroy(softc->buf128_zone); -#endif + uma_zdestroy(softc->bufmin_zone); + if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) + uma_zdestroy(softc->bufmax_zone); uma_zdestroy(softc->beio_zone); mtx_destroy(&softc->lock); sx_destroy(&softc->modify_lock); Index: sys/cam/mmc/mmc_da.c =================================================================== --- sys/cam/mmc/mmc_da.c +++ sys/cam/mmc/mmc_da.c @@ -1587,7 +1587,7 @@ part->disk->d_name = part->name; part->disk->d_drv1 = part; part->disk->d_maxsize = - MIN(MAXPHYS, sdda_get_max_data(periph, + MIN(maxphys, sdda_get_max_data(periph, (union ccb *)&cpi) * mmc_get_sector_size(periph)); part->disk->d_unit = cnt; part->disk->d_flags = 0; Index: sys/cam/nvme/nvme_da.c =================================================================== --- sys/cam/nvme/nvme_da.c +++ sys/cam/nvme/nvme_da.c @@ -906,8 +906,8 @@ maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ - else if (maxio > MAXPHYS) - maxio = MAXPHYS; /* for safety */ + else if (maxio > maxphys) + maxio = maxphys; /* for safety */ disk->d_maxsize = maxio; flbas_fmt = (nsd->flbas >> NVME_NS_DATA_FLBAS_FORMAT_SHIFT) & NVME_NS_DATA_FLBAS_FORMAT_MASK; Index: sys/cam/scsi/scsi_cd.c =================================================================== --- sys/cam/scsi/scsi_cd.c +++ sys/cam/scsi/scsi_cd.c @@ -696,8 +696,8 @@ softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->disk->d_maxsize = DFLTPHYS; /* traditional default */ - else if (cpi.maxio > MAXPHYS) - softc->disk->d_maxsize = MAXPHYS; /* for safety */ + else if (cpi.maxio > maxphys) + softc->disk->d_maxsize = maxphys; /* for safety */ else softc->disk->d_maxsize = cpi.maxio; softc->disk->d_flags = 0; Index: sys/cam/scsi/scsi_da.c =================================================================== --- sys/cam/scsi/scsi_da.c +++ sys/cam/scsi/scsi_da.c @@ -2921,8 +2921,8 @@ softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ - else if (cpi.maxio > MAXPHYS) - softc->maxio = MAXPHYS; /* for safety */ + else if (cpi.maxio > maxphys) + softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; if (softc->quirks & DA_Q_128KB) @@ -4819,7 +4819,7 @@ if (maxsector == 0) maxsector = -1; } - if (block_size >= MAXPHYS) { + if (block_size >= maxphys) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); Index: sys/cam/scsi/scsi_pass.c =================================================================== --- sys/cam/scsi/scsi_pass.c +++ sys/cam/scsi/scsi_pass.c @@ -583,15 +583,15 @@ periph->periph_name, periph->unit_number); snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO", periph->periph_name, periph->unit_number); - softc->io_zone_size = MAXPHYS; + softc->io_zone_size = maxphys; knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph)); xpt_path_inq(&cpi, periph->path); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ - else if (cpi.maxio > MAXPHYS) - softc->maxio = MAXPHYS; /* for safety */ + else if (cpi.maxio > maxphys) + softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ @@ -1507,7 +1507,7 @@ /* * We allocate buffers in io_zone_size increments for an - * S/G list. This will generally be MAXPHYS. + * S/G list. This will generally be maxphys. */ if (lengths[0] <= softc->io_zone_size) num_segs_needed = 1; Index: sys/cam/scsi/scsi_sa.c =================================================================== --- sys/cam/scsi/scsi_sa.c +++ sys/cam/scsi/scsi_sa.c @@ -2447,12 +2447,12 @@ /* * If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take - * the smaller of cpi.maxio or MAXPHYS. + * the smaller of cpi.maxio or maxphys. */ if (cpi.maxio == 0) softc->maxio = DFLTPHYS; - else if (cpi.maxio > MAXPHYS) - softc->maxio = MAXPHYS; + else if (cpi.maxio > maxphys) + softc->maxio = maxphys; else softc->maxio = cpi.maxio; Index: sys/cam/scsi/scsi_sg.c =================================================================== --- sys/cam/scsi/scsi_sg.c +++ sys/cam/scsi/scsi_sg.c @@ -327,8 +327,8 @@ if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ - else if (cpi.maxio > MAXPHYS) - softc->maxio = MAXPHYS; /* for safety */ + else if (cpi.maxio > maxphys) + softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ Index: sys/cam/scsi/scsi_target.c =================================================================== --- sys/cam/scsi/scsi_target.c +++ sys/cam/scsi/scsi_target.c @@ -404,8 +404,8 @@ } if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ - else if (cpi.maxio > MAXPHYS) - softc->maxio = MAXPHYS; /* for safety */ + else if (cpi.maxio > maxphys) + softc->maxio = maxphys; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ Index: sys/compat/linprocfs/linprocfs.c =================================================================== --- sys/compat/linprocfs/linprocfs.c +++ sys/compat/linprocfs/linprocfs.c @@ -1810,8 +1810,8 @@ buflen = resid; if (buflen > IOSIZE_MAX) return (EINVAL); - if (buflen > MAXPHYS) - buflen = MAXPHYS; + if (buflen > maxphys) + buflen = maxphys; if (resid <= 0) return (0); Index: sys/compat/linux/linux_ioctl.c =================================================================== --- sys/compat/linux/linux_ioctl.c +++ sys/compat/linux/linux_ioctl.c @@ -2152,7 +2152,7 @@ if (error != 0) return (error); - max_len = MAXPHYS - 1; + max_len = maxphys - 1; CURVNET_SET(TD_TO_VNET(td)); /* handle the 'request buffer size' case */ Index: sys/conf/options =================================================================== --- sys/conf/options +++ sys/conf/options @@ -603,7 +603,7 @@ KASSERT_PANIC_OPTIONAL opt_global.h MAXCPU opt_global.h MAXMEMDOM opt_global.h -MAXPHYS opt_global.h +MAXPHYS opt_maxphys.h MCLSHIFT opt_global.h MUTEX_NOINLINE opt_global.h LOCK_PROFILING opt_global.h Index: sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c =================================================================== --- sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c +++ sys/contrib/openzfs/module/os/freebsd/zfs/vdev_geom.c @@ -379,7 +379,7 @@ int i, n_bios, j; size_t bios_size; - maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize); + maxio = maxphys - (maxphys % cp->provider->sectorsize); n_bios = 0; /* How many bios are required for all commands ? */ Index: sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c =================================================================== --- sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c +++ sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c @@ -1191,7 +1191,7 @@ args.mda_si_drv2 = zv; if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname) == 0) { - dev->si_iosize_max = MAXPHYS; + dev->si_iosize_max = maxphys; zsd->zsd_cdev = dev; } } @@ -1327,7 +1327,7 @@ dmu_objset_disown(os, B_TRUE, FTAG); goto out_giant; } - dev->si_iosize_max = MAXPHYS; + dev->si_iosize_max = maxphys; zsd->zsd_cdev = dev; } (void) strlcpy(zv->zv_name, name, MAXPATHLEN); Index: sys/dev/ahci/ahci.h =================================================================== --- sys/dev/ahci/ahci.h +++ sys/dev/ahci/ahci.h @@ -310,13 +310,8 @@ #define AHCI_P_DEVSLP_DM 0x0e000000 #define AHCI_P_DEVSLP_DM_SHIFT 25 -/* Just to be sure, if building as module. */ -#if MAXPHYS < 512 * 1024 -#undef MAXPHYS -#define MAXPHYS 512 * 1024 -#endif /* Pessimistic prognosis on number of required S/G entries */ -#define AHCI_SG_ENTRIES (roundup(btoc(MAXPHYS) + 1, 8)) +#define AHCI_SG_ENTRIES MIN(roundup(btoc(maxphys) + 1, 8), 65528) /* Command list. 32 commands. First, 1Kbyte aligned. */ #define AHCI_CL_OFFSET 0 #define AHCI_CL_SIZE 32 @@ -344,7 +339,7 @@ u_int8_t cfis[64]; u_int8_t acmd[32]; u_int8_t reserved[32]; - struct ahci_dma_prd prd_tab[AHCI_SG_ENTRIES]; + struct ahci_dma_prd prd_tab[]; } __packed; struct ahci_cmd_list { @@ -394,6 +389,7 @@ struct ahci_channel *ch; /* Channel */ u_int8_t slot; /* Number of this slot */ enum ahci_slot_states state; /* Slot state */ + u_int ct_offset; /* cmd_tab offset */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ Index: sys/dev/ahci/ahci.c =================================================================== --- sys/dev/ahci/ahci.c +++ sys/dev/ahci/ahci.c @@ -1124,8 +1124,7 @@ error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, - AHCI_SG_ENTRIES, AHCI_PRD_MAX, + AHCI_SG_ENTRIES * PAGE_SIZE, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag); if (error != 0) goto error; @@ -1187,6 +1186,7 @@ slot->ch = ch; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; + slot->ct_offset = AHCI_CT_OFFSET + AHCI_CT_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); @@ -1642,8 +1642,7 @@ } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ - ctp = (struct ahci_cmd_tab *) - (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); + ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { @@ -1672,8 +1671,7 @@ uint16_t cmd_flags; /* Get a piece of the workspace for this request */ - ctp = (struct ahci_cmd_tab *) - (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); + ctp = (struct ahci_cmd_tab *)(ch->dma.work + slot->ct_offset); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); @@ -1710,8 +1708,7 @@ softreset = 0; clp->bytecount = 0; clp->cmd_flags = htole16(cmd_flags); - clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + - (AHCI_CT_SIZE * slot->slot)); + clp->cmd_table_phys = htole64(ch->dma.work_bus + slot->ct_offset); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, @@ -2868,7 +2865,7 @@ cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; - cpi->maxio = MAXPHYS; + cpi->maxio = ctob(AHCI_SG_ENTRIES - 1); /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (ch->quirks & AHCI_Q_MAXIO_64K) cpi->maxio = min(cpi->maxio, 128 * 512); Index: sys/dev/ahci/ahciem.c =================================================================== --- sys/dev/ahci/ahciem.c +++ sys/dev/ahci/ahciem.c @@ -641,7 +641,7 @@ cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); Index: sys/dev/ata/ata-all.h =================================================================== --- sys/dev/ata/ata-all.h +++ sys/dev/ata/ata-all.h @@ -152,7 +152,7 @@ #define ATA_SACTIVE 16 /* DMA register defines */ -#define ATA_DMA_ENTRIES 256 +#define ATA_DMA_ENTRIES MAX(17, btoc(maxphys) + 1) #define ATA_DMA_EOT 0x80000000 #define ATA_BMCMD_PORT 17 Index: sys/dev/ata/ata-all.c =================================================================== --- sys/dev/ata/ata-all.c +++ sys/dev/ata/ata-all.c @@ -139,7 +139,7 @@ if (ch->flags & ATA_SATA) ch->user[i].bytecount = 8192; else - ch->user[i].bytecount = MAXPHYS; + ch->user[i].bytecount = 65536; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->flags & ATA_SATA) { Index: sys/dev/ata/ata-dma.c =================================================================== --- sys/dev/ata/ata-dma.c +++ sys/dev/ata/ata-dma.c @@ -87,7 +87,7 @@ if (ch->dma.segsize == 0) ch->dma.segsize = 65536; if (ch->dma.max_iosize == 0) - ch->dma.max_iosize = MIN((ATA_DMA_ENTRIES - 1) * PAGE_SIZE, MAXPHYS); + ch->dma.max_iosize = (ATA_DMA_ENTRIES - 1) * PAGE_SIZE; if (ch->dma.max_address == 0) ch->dma.max_address = BUS_SPACE_MAXADDR_32BIT; if (ch->dma.dma_slots == 0) Index: sys/dev/flash/cqspi.c =================================================================== --- sys/dev/flash/cqspi.c +++ sys/dev/flash/cqspi.c @@ -721,9 +721,9 @@ return (ENXIO); } - xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0, + xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, maxphys, 8, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR); - xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, MAXPHYS, 8, 16, 0, + xdma_prep_sg(sc->xchan_rx, TX_QUEUE_SIZE, maxphys, 8, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR); cqspi_init(sc); Index: sys/dev/iscsi/iscsi.c =================================================================== --- sys/dev/iscsi/iscsi.c +++ sys/dev/iscsi/iscsi.c @@ -2407,7 +2407,7 @@ cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; cpi->ccb_h.status = CAM_REQ_CMP; break; } Index: sys/dev/md/md.c =================================================================== --- sys/dev/md/md.c +++ sys/dev/md/md.c @@ -960,9 +960,10 @@ piov = auio.uio_iov; } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { pb = uma_zalloc(md_pbuf_zone, M_WAITOK); + MPASS((pb->b_flags & B_MAXPHYS) != 0); bp->bio_resid = len; unmapped_step: - npages = atop(min(MAXPHYS, round_page(len + (ma_offs & + npages = atop(min(maxphys, round_page(len + (ma_offs & PAGE_MASK)))); iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len); KASSERT(iolen > 0, ("zero iolen")); @@ -1684,7 +1685,7 @@ sectsize = DEV_BSIZE; else sectsize = mdr->md_sectorsize; - if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize) + if (sectsize > maxphys || mdr->md_mediasize < sectsize) return (EINVAL); if (mdr->md_options & MD_AUTOUNIT) sc = mdnew(-1, &error, mdr->md_type); Index: sys/dev/mfi/mfi.c =================================================================== --- sys/dev/mfi/mfi.c +++ sys/dev/mfi/mfi.c @@ -457,7 +457,7 @@ /* * Get information needed for sizing the contiguous memory for the * frame pool. Size down the sgl parameter since we know that - * we will never need more than what's required for MAXPHYS. + * we will never need more than what's required for MFI_MAXPHYS. * It would be nice if these constants were available at runtime * instead of compile time. */ Index: sys/dev/mpr/mpr.c =================================================================== --- sys/dev/mpr/mpr.c +++ sys/dev/mpr/mpr.c @@ -436,14 +436,14 @@ /* * If I/O size limitation requested then use it and pass up to CAM. - * If not, use MAXPHYS as an optimization hint, but report HW limit. + * If not, use maxphys as an optimization hint, but report HW limit. */ if (sc->max_io_pages > 0) { maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); sc->maxio = maxio; } else { sc->maxio = maxio; - maxio = min(maxio, MAXPHYS); + maxio = min(maxio, maxphys); } sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / Index: sys/dev/mps/mps.c =================================================================== --- sys/dev/mps/mps.c +++ sys/dev/mps/mps.c @@ -418,14 +418,14 @@ /* * If I/O size limitation requested, then use it and pass up to CAM. - * If not, use MAXPHYS as an optimization hint, but report HW limit. + * If not, use maxphys as an optimization hint, but report HW limit. */ if (sc->max_io_pages > 0) { maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); sc->maxio = maxio; } else { sc->maxio = maxio; - maxio = min(maxio, MAXPHYS); + maxio = min(maxio, maxphys); } sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / Index: sys/dev/mpt/mpt.h =================================================================== --- sys/dev/mpt/mpt.h +++ sys/dev/mpt/mpt.h @@ -668,7 +668,7 @@ bus_addr_t request_phys; /* BusAddr of request memory */ uint32_t max_seg_cnt; /* calculated after IOC facts */ - uint32_t max_cam_seg_cnt;/* calculated from MAXPHYS*/ + uint32_t max_cam_seg_cnt;/* calculated from maxphys */ /* * Hardware management Index: sys/dev/mpt/mpt.c =================================================================== --- sys/dev/mpt/mpt.c +++ sys/dev/mpt/mpt.c @@ -2691,7 +2691,7 @@ /* * Use this as the basis for reporting the maximum I/O size to CAM. */ - mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1); + mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, btoc(maxphys) + 1); /* XXX Lame Locking! */ MPT_UNLOCK(mpt); Index: sys/dev/mrsas/mrsas.c =================================================================== --- sys/dev/mrsas/mrsas.c +++ sys/dev/mrsas/mrsas.c @@ -1922,9 +1922,9 @@ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - MAXPHYS, /* maxsize */ + maxphys, /* maxsize */ sc->max_num_sge, /* nsegments */ - MAXPHYS, /* maxsegsize */ + maxphys, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mrsas_parent_tag /* tag */ @@ -2154,9 +2154,9 @@ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - MAXPHYS, + maxphys, sc->max_num_sge, /* nsegments */ - MAXPHYS, + maxphys, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->io_lock, Index: sys/dev/mvs/mvs.h =================================================================== --- sys/dev/mvs/mvs.h +++ sys/dev/mvs/mvs.h @@ -392,7 +392,7 @@ #define MVS_MAX_SLOTS 32 /* Pessimistic prognosis on number of required S/G entries */ -#define MVS_SG_ENTRIES (btoc(MAXPHYS) + 1) +#define MVS_SG_ENTRIES (btoc(maxphys) + 1) /* EDMA Command Request Block (CRQB) Data */ struct mvs_crqb { @@ -505,6 +505,7 @@ int slot; /* Number of this slot */ int tag; /* Used command tag */ enum mvs_slot_states state; /* Slot state */ + u_int eprd_offset; /* EPRD offset */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ Index: sys/dev/mvs/mvs.c =================================================================== --- sys/dev/mvs/mvs.c +++ sys/dev/mvs/mvs.c @@ -370,8 +370,7 @@ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, MVS_EPRD_MAX, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - MVS_SG_ENTRIES * PAGE_SIZE * MVS_MAX_SLOTS, - MVS_SG_ENTRIES, MVS_EPRD_MAX, + MVS_SG_ENTRIES * PAGE_SIZE, MVS_SG_ENTRIES, MVS_EPRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } @@ -438,6 +437,7 @@ slot->dev = dev; slot->slot = i; slot->state = MVS_SLOT_EMPTY; + slot->eprd_offset = MVS_EPRD_OFFSET + MVS_EPRD_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); @@ -1286,8 +1286,7 @@ } else { slot->dma.addr = 0; /* Get a piece of the workspace for this EPRD */ - eprd = (struct mvs_eprd *) - (ch->dma.workrq + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot)); + eprd = (struct mvs_eprd *)(ch->dma.workrq + slot->eprd_offset); /* Fill S/G table */ for (i = 0; i < nsegs; i++) { eprd[i].prdbal = htole32(segs[i].ds_addr); @@ -1405,8 +1404,7 @@ DELAY(10); if (ch->basic_dma) { /* Start basic DMA. */ - eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + - (MVS_EPRD_SIZE * slot->slot); + eprd = ch->dma.workrq_bus + slot->eprd_offset; ATA_OUTL(ch->r_mem, DMA_DTLBA, eprd); ATA_OUTL(ch->r_mem, DMA_DTHBA, (eprd >> 16) >> 16); ATA_OUTL(ch->r_mem, DMA_C, DMA_C_START | @@ -1433,7 +1431,7 @@ int i; /* Get address of the prepared EPRD */ - eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot); + eprd = ch->dma.workrq_bus + slot->eprd_offset; /* Prepare CRQB. Gen IIe uses different CRQB format. */ if (ch->quirks & MVS_Q_GENIIE) { crqb2e = (struct mvs_crqb_gen2e *) @@ -2423,7 +2421,7 @@ cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; if ((ch->quirks & MVS_Q_SOC) == 0) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); Index: sys/dev/nvme/nvme.h =================================================================== --- sys/dev/nvme/nvme.h +++ sys/dev/nvme/nvme.h @@ -60,7 +60,7 @@ #define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) /* Cap nvme to 1MB transfers driver explodes with larger sizes */ -#define NVME_MAX_XFER_SIZE (MAXPHYS < (1<<20) ? MAXPHYS : (1<<20)) +#define NVME_MAX_XFER_SIZE (maxphys < (1<<20) ? maxphys : (1<<20)) /* Register field definitions */ #define NVME_CAP_LO_REG_MQES_SHIFT (0) Index: sys/dev/nvme/nvme_ctrlr.c =================================================================== --- sys/dev/nvme/nvme_ctrlr.c +++ sys/dev/nvme/nvme_ctrlr.c @@ -1248,13 +1248,13 @@ if (pt->len > 0) { /* * vmapbuf calls vm_fault_quick_hold_pages which only maps full - * pages. Ensure this request has fewer than MAXPHYS bytes when + * pages. Ensure this request has fewer than maxphys bytes when * extended to full pages. */ addr = (vm_offset_t)pt->buf; end = round_page(addr + pt->len); addr = trunc_page(addr); - if (end - addr > MAXPHYS) + if (end - addr > maxphys) return EIO; if (pt->len > ctrlr->max_xfer_size) { Index: sys/dev/pms/freebsd/driver/ini/src/agdef.h =================================================================== --- sys/dev/pms/freebsd/driver/ini/src/agdef.h +++ sys/dev/pms/freebsd/driver/ini/src/agdef.h @@ -62,7 +62,7 @@ #define AGTIAPI_MAX_DEVICE_7H 256 /*Max devices per channel in 7H */ #define AGTIAPI_MAX_DEVICE_8H 512 /*Max devices per channel in 8H*/ #define AGTIAPI_MAX_CAM_Q_DEPTH 1024 -#define AGTIAPI_NSEGS (MAXPHYS / PAGE_SIZE) +#define AGTIAPI_NSEGS (maxphys / PAGE_SIZE) /* ** Adapter specific defines */ Index: sys/dev/pms/freebsd/driver/ini/src/agtiapi.c =================================================================== --- sys/dev/pms/freebsd/driver/ini/src/agtiapi.c +++ sys/dev/pms/freebsd/driver/ini/src/agtiapi.c @@ -1623,8 +1623,8 @@ nsegs = AGTIAPI_NSEGS; rsize = AGTIAPI_MAX_DMA_SEGS; // 128 - AGTIAPI_PRINTK( "agtiapi_alloc_requests: MAXPHYS 0x%x PAGE_SIZE 0x%x \n", - MAXPHYS, PAGE_SIZE ); + AGTIAPI_PRINTK( "agtiapi_alloc_requests: maxphys 0x%lx PAGE_SIZE 0x%x \n", + maxphys, PAGE_SIZE ); AGTIAPI_PRINTK( "agtiapi_alloc_requests: nsegs %d rsize %d \n", nsegs, rsize ); // 32, 128 // This is for csio->data_ptr Index: sys/dev/sdhci/sdhci.c =================================================================== --- sys/dev/sdhci/sdhci.c +++ sys/dev/sdhci/sdhci.c @@ -722,19 +722,19 @@ int err; if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) { - if (MAXPHYS <= 1024 * 4) + if (maxphys <= 1024 * 4) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K; - else if (MAXPHYS <= 1024 * 8) + else if (maxphys <= 1024 * 8) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K; - else if (MAXPHYS <= 1024 * 16) + else if (maxphys <= 1024 * 16) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K; - else if (MAXPHYS <= 1024 * 32) + else if (maxphys <= 1024 * 32) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K; - else if (MAXPHYS <= 1024 * 64) + else if (maxphys <= 1024 * 64) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K; - else if (MAXPHYS <= 1024 * 128) + else if (maxphys <= 1024 * 128) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K; - else if (MAXPHYS <= 1024 * 256) + else if (maxphys <= 1024 * 256) slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K; else slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K; @@ -2534,7 +2534,7 @@ switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: - mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, MAXPHYS); + mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, maxphys); break; case XPT_GET_TRAN_SETTINGS: Index: sys/dev/siis/siis.h =================================================================== --- sys/dev/siis/siis.h +++ sys/dev/siis/siis.h @@ -263,18 +263,12 @@ #define SIIS_OFFSET 0x100 #define SIIS_STEP 0x80 -/* Just to be sure, if building as module. */ -#if MAXPHYS < 512 * 1024 -#undef MAXPHYS -#define MAXPHYS 512 * 1024 -#endif /* Pessimistic prognosis on number of required S/G entries */ -#define SIIS_SG_ENTRIES (roundup(btoc(MAXPHYS), 4) + 1) -/* Command tables. Up to 32 commands, Each, 128byte aligned. */ -#define SIIS_CT_OFFSET 0 -#define SIIS_CT_SIZE (32 + 16 + SIIS_SG_ENTRIES * 16) +#define SIIS_SG_ENTRIES (roundup(btoc(maxphys), 4) + 1) +/* Port Request Block + S/G entries. 128byte aligned. */ +#define SIIS_PRB_SIZE (32 + 16 + SIIS_SG_ENTRIES * 16) /* Total main work area. */ -#define SIIS_WORK_SIZE (SIIS_CT_OFFSET + SIIS_CT_SIZE * SIIS_MAX_SLOTS) +#define SIIS_WORK_SIZE (SIIS_PRB_SIZE * SIIS_MAX_SLOTS) struct siis_dma_prd { u_int64_t dba; @@ -287,12 +281,12 @@ } __packed; struct siis_cmd_ata { - struct siis_dma_prd prd[1 + SIIS_SG_ENTRIES]; + struct siis_dma_prd prd[2]; } __packed; struct siis_cmd_atapi { u_int8_t ccb[16]; - struct siis_dma_prd prd[SIIS_SG_ENTRIES]; + struct siis_dma_prd prd[1]; } __packed; struct siis_cmd { @@ -349,6 +343,7 @@ device_t dev; /* Device handle */ u_int8_t slot; /* Number of this slot */ enum siis_slot_states state; /* Slot state */ + u_int prb_offset; /* PRB offset */ union ccb *ccb; /* CCB occupying slot */ struct ata_dmaslot dma; /* DMA data of this slot */ struct callout timeout; /* Execution timeout */ Index: sys/dev/siis/siis.c =================================================================== --- sys/dev/siis/siis.c +++ sys/dev/siis/siis.c @@ -688,8 +688,7 @@ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, - SIIS_SG_ENTRIES * PAGE_SIZE * SIIS_MAX_SLOTS, - SIIS_SG_ENTRIES, 0xFFFFFFFF, + SIIS_SG_ENTRIES * PAGE_SIZE, SIIS_SG_ENTRIES, 0xFFFFFFFF, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } @@ -745,6 +744,7 @@ slot->dev = dev; slot->slot = i; slot->state = SIIS_SLOT_EMPTY; + slot->prb_offset = SIIS_PRB_SIZE * i; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); @@ -1034,8 +1034,7 @@ slot->dma.nsegs = nsegs; if (nsegs != 0) { /* Get a piece of the workspace for this request */ - ctp = (struct siis_cmd *)(ch->dma.work + SIIS_CT_OFFSET + - (SIIS_CT_SIZE * slot->slot)); + ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); /* Fill S/G table */ if (slot->ccb->ccb_h.func_code == XPT_ATA_IO) prd = &ctp->u.ata.prd[0]; @@ -1066,8 +1065,7 @@ mtx_assert(&ch->mtx, MA_OWNED); /* Get a piece of the workspace for this request */ - ctp = (struct siis_cmd *) - (ch->dma.work + SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot)); + ctp = (struct siis_cmd *)(ch->dma.work + slot->prb_offset); ctp->control = 0; ctp->protocol_override = 0; ctp->transfer_count = 0; @@ -1117,8 +1115,7 @@ /* Issue command to the controller. */ slot->state = SIIS_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); - prb_bus = ch->dma.work_bus + - SIIS_CT_OFFSET + (SIIS_CT_SIZE * slot->slot); + prb_bus = ch->dma.work_bus + slot->prb_offset; ATA_OUTL(ch->r_mem, SIIS_P_CACTL(slot->slot), prb_bus); ATA_OUTL(ch->r_mem, SIIS_P_CACTH(slot->slot), prb_bus >> 32); /* Start command execution timeout */ @@ -1967,7 +1964,7 @@ cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); Index: sys/dev/sym/sym_conf.h =================================================================== --- sys/dev/sym/sym_conf.h +++ sys/dev/sym/sym_conf.h @@ -95,9 +95,9 @@ * Max number of scatter/gather entries for an I/O. * Each entry costs 8 bytes in the internal CCB data structure. * We use at most 33 segments but also no more than required for handling - * MAXPHYS. + * legacy MAXPHYS == 128 * 1024. */ -#define SYM_CONF_MAX_SG (MIN(33, (MAXPHYS / PAGE_SIZE) + 1)) +#define SYM_CONF_MAX_SG (MIN(33, (128 * 1024 / PAGE_SIZE) + 1)) /* * Max number of targets. Index: sys/dev/usb/storage/umass.c =================================================================== --- sys/dev/usb/storage/umass.c +++ sys/dev/usb/storage/umass.c @@ -2334,7 +2334,7 @@ case USB_SPEED_SUPER: cpi->base_transfer_speed = UMASS_SUPER_TRANSFER_SPEED; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; break; case USB_SPEED_HIGH: cpi->base_transfer_speed = Index: sys/dev/virtio/block/virtio_blk.c =================================================================== --- sys/dev/virtio/block/virtio_blk.c +++ sys/dev/virtio/block/virtio_blk.c @@ -323,7 +323,7 @@ * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { - if (blkcfg.size_max < MAXPHYS) { + if (blkcfg.size_max < maxphys) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); @@ -623,7 +623,7 @@ nsegs = VTBLK_MIN_SEGMENTS; if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) { - nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1); + nsegs += MIN(blkcfg->seg_max, maxphys / PAGE_SIZE + 1); if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else @@ -713,8 +713,8 @@ * no pages are contiguous. This may impose an artificially low * maximum I/O size. But in practice, since QEMU advertises 128 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE, - * which is typically greater than MAXPHYS. Eventually we should - * just advertise MAXPHYS and split buffers that are too big. + * which is typically greater than maxphys. Eventually we should + * just advertise maxphys and split buffers that are too big. * * Note we must subtract one additional segment in case of non * page aligned buffers. Index: sys/dev/virtio/scsi/virtio_scsi.c =================================================================== --- sys/dev/virtio/scsi/virtio_scsi.c +++ sys/dev/virtio/scsi/virtio_scsi.c @@ -450,7 +450,7 @@ nsegs = VTSCSI_MIN_SEGMENTS; if (seg_max > 0) { - nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1); + nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1); if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); } else Index: sys/dev/xen/blkback/blkback.c =================================================================== --- sys/dev/xen/blkback/blkback.c +++ sys/dev/xen/blkback/blkback.c @@ -143,9 +143,10 @@ /** * The maximum mapped region size per request we will allow in a negotiated * block-front/back communication channel. + * Use old default of MAXPHYS == 128K. */ #define XBB_MAX_REQUEST_SIZE \ - MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + MIN(128 * 1024, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) /** * The maximum number of segments (within a request header and accompanying Index: sys/dev/xen/blkfront/blkfront.c =================================================================== --- sys/dev/xen/blkfront/blkfront.c +++ sys/dev/xen/blkfront/blkfront.c @@ -1306,8 +1306,8 @@ sc->xbd_max_request_segments = 0; if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS) sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS; - if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS)) - sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS); + if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(maxphys)) + sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(maxphys); sc->xbd_max_request_indirectpages = XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments); if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) Index: sys/fs/cd9660/cd9660_vfsops.c =================================================================== --- sys/fs/cd9660/cd9660_vfsops.c +++ sys/fs/cd9660/cd9660_vfsops.c @@ -238,8 +238,8 @@ goto out; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; - if (mp->mnt_iosize_max > MAXPHYS) - mp->mnt_iosize_max = MAXPHYS; + if (mp->mnt_iosize_max > maxphys) + mp->mnt_iosize_max = maxphys; bo = &devvp->v_bufobj; Index: sys/fs/ext2fs/ext2_vfsops.c =================================================================== --- sys/fs/ext2fs/ext2_vfsops.c +++ sys/fs/ext2fs/ext2_vfsops.c @@ -876,8 +876,8 @@ bo->bo_ops = g_vfs_bufops; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; - if (mp->mnt_iosize_max > MAXPHYS) - mp->mnt_iosize_max = MAXPHYS; + if (mp->mnt_iosize_max > maxphys) + mp->mnt_iosize_max = maxphys; bp = NULL; ump = NULL; @@ -922,7 +922,7 @@ * in ext2fs doesn't have these variables, so we can calculate * them here. */ - e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize); + e2fs_maxcontig = MAX(1, maxphys / ump->um_e2fs->e2fs_bsize); ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG); if (ump->um_e2fs->e2fs_contigsumsize > 0) { size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t); Index: sys/fs/fuse/fuse_vfsops.c =================================================================== --- sys/fs/fuse/fuse_vfsops.c +++ sys/fs/fuse/fuse_vfsops.c @@ -441,7 +441,7 @@ } memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntfromname, fspec, MNAMELEN); - mp->mnt_iosize_max = MAXPHYS; + mp->mnt_iosize_max = maxphys; /* Now handshaking with daemon */ fuse_internal_send_init(data, td); Index: sys/fs/msdosfs/msdosfs_vfsops.c =================================================================== --- sys/fs/msdosfs/msdosfs_vfsops.c +++ sys/fs/msdosfs/msdosfs_vfsops.c @@ -429,8 +429,8 @@ VOP_UNLOCK(devvp); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; - if (mp->mnt_iosize_max > MAXPHYS) - mp->mnt_iosize_max = MAXPHYS; + if (mp->mnt_iosize_max > maxphys) + mp->mnt_iosize_max = maxphys; /* * Read the boot sector of the filesystem, and then check the Index: sys/fs/udf/udf_vfsops.c =================================================================== --- sys/fs/udf/udf_vfsops.c +++ sys/fs/udf/udf_vfsops.c @@ -338,8 +338,8 @@ if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; - if (mp->mnt_iosize_max > MAXPHYS) - mp->mnt_iosize_max = MAXPHYS; + if (mp->mnt_iosize_max > maxphys) + mp->mnt_iosize_max = maxphys; /* XXX: should be M_WAITOK */ udfmp = malloc(sizeof(struct udf_mnt), M_UDFMOUNT, Index: sys/geom/cache/g_cache.c =================================================================== --- sys/geom/cache/g_cache.c +++ sys/geom/cache/g_cache.c @@ -494,7 +494,7 @@ /* Block size restrictions. */ bshift = ffs(md->md_bsize) - 1; - if (md->md_bsize == 0 || md->md_bsize > MAXPHYS || + if (md->md_bsize == 0 || md->md_bsize > maxphys || md->md_bsize != 1 << bshift || (md->md_bsize % pp->sectorsize) != 0) { G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name); Index: sys/geom/eli/g_eli_integrity.c =================================================================== --- sys/geom/eli/g_eli_integrity.c +++ sys/geom/eli/g_eli_integrity.c @@ -352,17 +352,17 @@ /* * We write more than what is requested, so we have to be ready to write - * more than MAXPHYS. + * more than maxphys. */ cbp2 = NULL; - if (cbp->bio_length > MAXPHYS) { + if (cbp->bio_length > maxphys) { cbp2 = g_duplicate_bio(bp); - cbp2->bio_length = cbp->bio_length - MAXPHYS; - cbp2->bio_data = cbp->bio_data + MAXPHYS; - cbp2->bio_offset = cbp->bio_offset + MAXPHYS; + cbp2->bio_length = cbp->bio_length - maxphys; + cbp2->bio_data = cbp->bio_data + maxphys; + cbp2->bio_offset = cbp->bio_offset + maxphys; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_write_done; - cbp->bio_length = MAXPHYS; + cbp->bio_length = maxphys; } /* * Send encrypted data to the provider. @@ -413,17 +413,17 @@ /* * We read more than what is requested, so we have to be ready to read - * more than MAXPHYS. + * more than maxphys. */ cbp2 = NULL; - if (cbp->bio_length > MAXPHYS) { + if (cbp->bio_length > maxphys) { cbp2 = g_duplicate_bio(bp); - cbp2->bio_length = cbp->bio_length - MAXPHYS; - cbp2->bio_data = cbp->bio_data + MAXPHYS; - cbp2->bio_offset = cbp->bio_offset + MAXPHYS; + cbp2->bio_length = cbp->bio_length - maxphys; + cbp2->bio_data = cbp->bio_data + maxphys; + cbp2->bio_offset = cbp->bio_offset + maxphys; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_read_done; - cbp->bio_length = MAXPHYS; + cbp->bio_length = maxphys; } /* * Read encrypted data from provider. Index: sys/geom/geom_dev.c =================================================================== --- sys/geom/geom_dev.c +++ sys/geom/geom_dev.c @@ -377,7 +377,7 @@ } dev = sc->sc_dev; dev->si_flags |= SI_UNMAPPED; - dev->si_iosize_max = MAXPHYS; + dev->si_iosize_max = maxphys; error = init_dumpdev(dev); if (error != 0) printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n", @@ -713,14 +713,14 @@ if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) { rep = &zone_args->zone_params.report; -#define MAXENTRIES (MAXPHYS / sizeof(struct disk_zone_rep_entry)) +#define MAXENTRIES (maxphys / sizeof(struct disk_zone_rep_entry)) if (rep->entries_allocated > MAXENTRIES) rep->entries_allocated = MAXENTRIES; alloc_size = rep->entries_allocated * sizeof(struct disk_zone_rep_entry); if (alloc_size != 0) new_entries = g_malloc(alloc_size, - M_WAITOK| M_ZERO); + M_WAITOK | M_ZERO); old_entries = rep->entries; rep->entries = new_entries; } Index: sys/geom/geom_io.c =================================================================== --- sys/geom/geom_io.c +++ sys/geom/geom_io.c @@ -882,7 +882,7 @@ int errorc; KASSERT(length > 0 && length >= cp->provider->sectorsize && - length <= MAXPHYS, ("g_read_data(): invalid length %jd", + length <= maxphys, ("g_read_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); @@ -937,7 +937,7 @@ int error; KASSERT(length > 0 && length >= cp->provider->sectorsize && - length <= MAXPHYS, ("g_write_data(): invalid length %jd", + length <= maxphys, ("g_write_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); Index: sys/geom/journal/g_journal.h =================================================================== --- sys/geom/journal/g_journal.h +++ sys/geom/journal/g_journal.h @@ -215,7 +215,7 @@ #define GJ_RECORD_HEADER_MAGIC "GJRHDR" #define GJ_RECORD_HEADER_NENTRIES (20) #define GJ_RECORD_MAX_SIZE(sc) \ - ((sc)->sc_jprovider->sectorsize + GJ_RECORD_HEADER_NENTRIES * MAXPHYS) + ((sc)->sc_jprovider->sectorsize + GJ_RECORD_HEADER_NENTRIES * maxphys) #define GJ_VALIDATE_OFFSET(offset, sc) do { \ if ((offset) + GJ_RECORD_MAX_SIZE(sc) >= (sc)->sc_jend) { \ (offset) = (sc)->sc_jstart; \ Index: sys/geom/journal/g_journal.c =================================================================== --- sys/geom/journal/g_journal.c +++ sys/geom/journal/g_journal.c @@ -1053,7 +1053,7 @@ continue; } /* Be sure we don't end up with too big bio. */ - if (pbp->bio_length + cbp->bio_length > MAXPHYS) { + if (pbp->bio_length + cbp->bio_length > maxphys) { pbp = cbp; continue; } Index: sys/geom/mirror/g_mirror.c =================================================================== --- sys/geom/mirror/g_mirror.c +++ sys/geom/mirror/g_mirror.c @@ -2070,7 +2070,7 @@ bp->bio_to = disk->d_softc->sc_provider; bp->bio_caller1 = (void *)(uintptr_t)idx; bp->bio_offset = offset; - bp->bio_length = MIN(MAXPHYS, + bp->bio_length = MIN(maxphys, disk->d_softc->sc_mediasize - bp->bio_offset); } @@ -2128,7 +2128,7 @@ bp = g_alloc_bio(); sync->ds_bios[i] = bp; - bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); + bp->bio_data = malloc(maxphys, M_MIRROR, M_WAITOK); bp->bio_caller1 = (void *)(uintptr_t)i; g_mirror_sync_reinit(disk, bp, sync->ds_offset); sync->ds_offset += bp->bio_length; Index: sys/geom/nop/g_nop.c =================================================================== --- sys/geom/nop/g_nop.c +++ sys/geom/nop/g_nop.c @@ -381,7 +381,7 @@ gctl_error(req, "Invalid secsize for provider %s.", pp->name); return (EINVAL); } - if (secsize > MAXPHYS) { + if (secsize > maxphys) { gctl_error(req, "secsize is too big."); return (EINVAL); } Index: sys/geom/part/g_part_apm.c =================================================================== --- sys/geom/part/g_part_apm.c +++ sys/geom/part/g_part_apm.c @@ -582,10 +582,10 @@ baseentry = LIST_NEXT(baseentry, gpe_entry); } - for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) { + for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) { error = g_write_data(cp, (1 + index) * pp->sectorsize, buf + index * pp->sectorsize, - (tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS: + (tblsz - index > maxphys / pp->sectorsize) ? maxphys: (tblsz - index) * pp->sectorsize); if (error) { g_free(buf); Index: sys/geom/part/g_part_gpt.c =================================================================== --- sys/geom/part/g_part_gpt.c +++ sys/geom/part/g_part_gpt.c @@ -552,8 +552,8 @@ tblsz = hdr->hdr_entries * hdr->hdr_entsz; sectors = howmany(tblsz, pp->sectorsize); buf = g_malloc(sectors * pp->sectorsize, M_WAITOK | M_ZERO); - for (idx = 0; idx < sectors; idx += MAXPHYS / pp->sectorsize) { - size = (sectors - idx > MAXPHYS / pp->sectorsize) ? MAXPHYS: + for (idx = 0; idx < sectors; idx += maxphys / pp->sectorsize) { + size = (sectors - idx > maxphys / pp->sectorsize) ? maxphys: (sectors - idx) * pp->sectorsize; p = g_read_data(cp, (table->lba[elt] + idx) * pp->sectorsize, size, &error); @@ -1237,11 +1237,11 @@ crc = crc32(buf, table->hdr->hdr_size); le32enc(buf + 16, crc); - for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) { + for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) { error = g_write_data(cp, (table->lba[GPT_ELT_PRITBL] + index) * pp->sectorsize, buf + (index + 1) * pp->sectorsize, - (tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS: + (tblsz - index > maxphys / pp->sectorsize) ? maxphys : (tblsz - index) * pp->sectorsize); if (error) goto out; @@ -1259,11 +1259,11 @@ crc = crc32(buf, table->hdr->hdr_size); le32enc(buf + 16, crc); - for (index = 0; index < tblsz; index += MAXPHYS / pp->sectorsize) { + for (index = 0; index < tblsz; index += maxphys / pp->sectorsize) { error = g_write_data(cp, (table->lba[GPT_ELT_SECTBL] + index) * pp->sectorsize, buf + (index + 1) * pp->sectorsize, - (tblsz - index > MAXPHYS / pp->sectorsize) ? MAXPHYS: + (tblsz - index > maxphys / pp->sectorsize) ? maxphys : (tblsz - index) * pp->sectorsize); if (error) goto out; Index: sys/geom/part/g_part_ldm.c =================================================================== --- sys/geom/part/g_part_ldm.c +++ sys/geom/part/g_part_ldm.c @@ -1020,10 +1020,10 @@ pp = cp->provider; size = howmany(db->dh.last_seq * db->dh.size, pp->sectorsize); size -= 1; /* one sector takes vmdb header */ - for (n = 0; n < size; n += MAXPHYS / pp->sectorsize) { + for (n = 0; n < size; n += maxphys / pp->sectorsize) { offset = db->ph.db_offset + db->th.conf_offset + n + 1; - sectors = (size - n) > (MAXPHYS / pp->sectorsize) ? - MAXPHYS / pp->sectorsize: size - n; + sectors = (size - n) > (maxphys / pp->sectorsize) ? + maxphys / pp->sectorsize : size - n; /* read VBLKs */ buf = g_read_data(cp, offset * pp->sectorsize, sectors * pp->sectorsize, &error); Index: sys/geom/raid/md_ddf.c =================================================================== --- sys/geom/raid/md_ddf.c +++ sys/geom/raid/md_ddf.c @@ -1160,12 +1160,12 @@ (GET16(meta, hdr->Configuration_Record_Length) * ss - 512) / 12)); } - if (GET32(meta, hdr->cd_length) * ss >= MAXPHYS || - GET32(meta, hdr->pdr_length) * ss >= MAXPHYS || - GET32(meta, hdr->vdr_length) * ss >= MAXPHYS || - GET32(meta, hdr->cr_length) * ss >= MAXPHYS || - GET32(meta, hdr->pdd_length) * ss >= MAXPHYS || - GET32(meta, hdr->bbmlog_length) * ss >= MAXPHYS) { + if (GET32(meta, hdr->cd_length) * ss >= maxphys || + GET32(meta, hdr->pdr_length) * ss >= maxphys || + GET32(meta, hdr->vdr_length) * ss >= maxphys || + GET32(meta, hdr->cr_length) * ss >= maxphys || + GET32(meta, hdr->pdd_length) * ss >= maxphys || + GET32(meta, hdr->bbmlog_length) * ss >= maxphys) { G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name); goto hdrerror; } Index: sys/geom/raid/md_promise.c =================================================================== --- sys/geom/raid/md_promise.c +++ sys/geom/raid/md_promise.c @@ -344,7 +344,7 @@ pp = cp->provider; subdisks = 0; - if (pp->sectorsize * 4 > MAXPHYS) { + if (pp->sectorsize * 4 > maxphys) { G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name); return (subdisks); } Index: sys/geom/raid3/g_raid3.c =================================================================== --- sys/geom/raid3/g_raid3.c +++ sys/geom/raid3/g_raid3.c @@ -1723,7 +1723,7 @@ g_reset_bio(bp); bp->bio_cmd = BIO_READ; bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); - bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); + bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); bp->bio_done = g_raid3_sync_done; bp->bio_data = data; @@ -1752,7 +1752,7 @@ if (boffset < moffset) moffset = boffset; } - if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) { + if (sync->ds_offset_done + maxphys * 100 < moffset) { /* Update offset_done on every 100 blocks. */ sync->ds_offset_done = moffset; g_raid3_update_metadata(disk); @@ -2241,10 +2241,10 @@ disk->d_sync.ds_bios[n] = bp; bp->bio_parent = NULL; bp->bio_cmd = BIO_READ; - bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK); + bp->bio_data = malloc(maxphys, M_RAID3, M_WAITOK); bp->bio_cflags = 0; bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); - bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); + bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); bp->bio_done = g_raid3_sync_done; bp->bio_from = disk->d_sync.ds_consumer; @@ -2909,7 +2909,7 @@ cp->provider->name); return (error); } - if (md->md_sectorsize > MAXPHYS) { + if (md->md_sectorsize > maxphys) { G_RAID3_DEBUG(0, "The blocksize is too big."); return (EINVAL); } Index: sys/geom/shsec/g_shsec.c =================================================================== --- sys/geom/shsec/g_shsec.c +++ sys/geom/shsec/g_shsec.c @@ -73,11 +73,12 @@ SYSCTL_DECL(_kern_geom); static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "GEOM_SHSEC stuff"); -static u_int g_shsec_debug = 0; +static u_int g_shsec_debug; SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0, "Debug level"); -static u_int g_shsec_maxmem = MAXPHYS * 100; -SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_shsec_maxmem, +static u_long g_shsec_maxmem; +SYSCTL_ULONG(_kern_geom_shsec, OID_AUTO, maxmem, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_shsec_maxmem, 0, "Maximum memory that can be allocated for I/O (in bytes)"); static u_int g_shsec_alloc_failed = 0; SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD, @@ -113,10 +114,12 @@ g_shsec_init(struct g_class *mp __unused) { - g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL, + g_shsec_maxmem = maxphys * 100; + TUNABLE_ULONG_FETCH("kern.geom.shsec.maxmem,", &g_shsec_maxmem); + g_shsec_zone = uma_zcreate("g_shsec_zone", maxphys, NULL, NULL, NULL, NULL, 0, 0); - g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS; - uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS); + g_shsec_maxmem -= g_shsec_maxmem % maxphys; + uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / maxphys); } static void Index: sys/geom/stripe/g_stripe.c =================================================================== --- sys/geom/stripe/g_stripe.c +++ sys/geom/stripe/g_stripe.c @@ -92,9 +92,10 @@ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, NULL, 0, g_sysctl_stripe_fast, "I", "Fast, but memory-consuming, mode"); -static u_int g_stripe_maxmem = MAXPHYS * 100; -SYSCTL_UINT(_kern_geom_stripe, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_stripe_maxmem, - 0, "Maximum memory that can be allocated in \"fast\" mode (in bytes)"); +static u_long g_stripe_maxmem; +SYSCTL_ULONG(_kern_geom_stripe, OID_AUTO, maxmem, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_stripe_maxmem, 0, + "Maximum memory that can be allocated in \"fast\" mode (in bytes)"); static u_int g_stripe_fast_failed = 0; SYSCTL_UINT(_kern_geom_stripe, OID_AUTO, fast_failed, CTLFLAG_RD, &g_stripe_fast_failed, 0, "How many times \"fast\" mode failed"); @@ -129,10 +130,12 @@ g_stripe_init(struct g_class *mp __unused) { - g_stripe_zone = uma_zcreate("g_stripe_zone", MAXPHYS, NULL, NULL, + g_stripe_maxmem = maxphys * 100; + TUNABLE_ULONG_FETCH("kern.geom.stripe.maxmem,", &g_stripe_maxmem); + g_stripe_zone = uma_zcreate("g_stripe_zone", maxphys, NULL, NULL, NULL, NULL, 0, 0); - g_stripe_maxmem -= g_stripe_maxmem % MAXPHYS; - uma_zone_set_max(g_stripe_zone, g_stripe_maxmem / MAXPHYS); + g_stripe_maxmem -= g_stripe_maxmem % maxphys; + uma_zone_set_max(g_stripe_zone, g_stripe_maxmem / maxphys); } static void @@ -633,7 +636,7 @@ * Do use "fast" mode when: * 1. "Fast" mode is ON. * and - * 2. Request size is less than or equal to MAXPHYS, + * 2. Request size is less than or equal to maxphys, * which should always be true. * and * 3. Request size is bigger than stripesize * ndisks. If it isn't, @@ -644,7 +647,7 @@ * and * 5. It is not a BIO_DELETE. */ - if (g_stripe_fast && bp->bio_length <= MAXPHYS && + if (g_stripe_fast && bp->bio_length <= maxphys && bp->bio_length >= stripesize * sc->sc_ndisks && (bp->bio_flags & BIO_UNMAPPED) == 0 && bp->bio_cmd != BIO_DELETE) { Index: sys/geom/uzip/g_uzip.c =================================================================== --- sys/geom/uzip/g_uzip.c +++ sys/geom/uzip/g_uzip.c @@ -136,7 +136,7 @@ /* * Maximum allowed valid block size (to prevent foot-shooting) */ -#define MAX_BLKSZ (MAXPHYS) +#define MAX_BLKSZ (maxphys) static char CLOOP_MAGIC_START[] = "#!/bin/sh\n"; @@ -292,7 +292,7 @@ bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk); while (1) { bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1); - if (bp2->bio_length <= MAXPHYS) { + if (bp2->bio_length <= maxphys) { break; } if (end_blk == (start_blk + 1)) { @@ -791,7 +791,7 @@ goto e4; } if (sc->blksz > MAX_BLKSZ) { - printf("%s: block size (%u) should not be larger than %d.\n", + printf("%s: block size (%u) should not be larger than %lu.\n", gp->name, sc->blksz, MAX_BLKSZ); } total_offsets = sc->nblocks + 1; Index: sys/geom/vinum/geom_vinum_var.h =================================================================== --- sys/geom/vinum/geom_vinum_var.h +++ sys/geom/vinum/geom_vinum_var.h @@ -106,7 +106,7 @@ /* Synchronization/initialization request sizes. */ #define GV_MIN_SYNCSIZE 512 -#define GV_MAX_SYNCSIZE MAXPHYS +#define GV_MAX_SYNCSIZE maxphys #define GV_DFLT_SYNCSIZE 65536 /* Flags for BIOs, as they are processed within vinum. */ Index: sys/geom/virstor/g_virstor.h =================================================================== --- sys/geom/virstor/g_virstor.h +++ sys/geom/virstor/g_virstor.h @@ -41,8 +41,8 @@ }; #define VIRSTOR_MAP_ENTRY_SIZE (sizeof(struct virstor_map_entry)) -#define VIRSTOR_MAP_BLOCK_ENTRIES (MAXPHYS / VIRSTOR_MAP_ENTRY_SIZE) -/* Struct size is guarded by CTASSERT in main source */ +#define VIRSTOR_MAP_BLOCK_ENTRIES (maxphys / VIRSTOR_MAP_ENTRY_SIZE) +/* Struct size is guarded by MPASS in main source */ #ifdef _KERNEL Index: sys/geom/virstor/g_virstor.c =================================================================== --- sys/geom/virstor/g_virstor.c +++ sys/geom/virstor/g_virstor.c @@ -148,8 +148,8 @@ { /* Catch map struct size mismatch at compile time; Map entries must - * fit into MAXPHYS exactly, with no wasted space. */ - CTASSERT(VIRSTOR_MAP_BLOCK_ENTRIES*VIRSTOR_MAP_ENTRY_SIZE == MAXPHYS); + * fit into maxphys exactly, with no wasted space. */ + MPASS(VIRSTOR_MAP_BLOCK_ENTRIES * VIRSTOR_MAP_ENTRY_SIZE == maxphys); /* Init UMA zones, TAILQ's, other global vars */ } @@ -1245,7 +1245,7 @@ struct g_virstor_map_entry *mapbuf; size_t bs; - bs = MIN(MAXPHYS, sc->map_size - count); + bs = MIN(maxphys, sc->map_size - count); if (bs % sc->sectorsize != 0) { /* Check for alignment errors */ bs = rounddown(bs, sc->sectorsize); Index: sys/kern/kern_mib.c =================================================================== --- sys/kern/kern_mib.c +++ sys/kern/kern_mib.c @@ -146,8 +146,29 @@ SYSCTL_STRING(_kern, KERN_BOOTFILE, bootfile, CTLFLAG_RW | CTLFLAG_MPSAFE, kernelname, sizeof kernelname, "Name of kernel file booted"); -SYSCTL_INT(_kern, KERN_MAXPHYS, maxphys, CTLFLAG_RD | CTLFLAG_CAPRD, - SYSCTL_NULL_INT_PTR, MAXPHYS, "Maximum block I/O access size"); +#ifdef COMPAT_FREEBSD12 +static int +sysctl_maxphys(SYSCTL_HANDLER_ARGS) +{ + u_long lvalue; + int ivalue; + + lvalue = maxphys; + if (sizeof(int) == sizeof(u_long) || req->oldlen >= sizeof(u_long)) + return (sysctl_handle_long(oidp, &lvalue, 0, req)); + if (lvalue > INT_MAX) + return (sysctl_handle_long(oidp, &lvalue, 0, req)); + ivalue = lvalue; + return (sysctl_handle_int(oidp, &ivalue, 0, req)); +} +SYSCTL_PROC(_kern, KERN_MAXPHYS, maxphys, CTLTYPE_LONG | CTLFLAG_RDTUN | + CTLFLAG_NOFETCH | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, + NULL, 0, sysctl_maxphys, "UL", "Maximum block I/O access size"); +#else +SYSCTL_ULONG(_kern, KERN_MAXPHYS, maxphys, + CTLFLAG_RDTUN | CTLFLAG_NOFETCH | CTLFLAG_CAPRD, + &maxphys, 0, "Maximum block I/O access size"); +#endif SYSCTL_INT(_hw, HW_NCPU, ncpu, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncpus, 0, "Number of active CPUs"); Index: sys/kern/kern_physio.c =================================================================== --- sys/kern/kern_physio.c +++ sys/kern/kern_physio.c @@ -69,7 +69,7 @@ * need to reject any requests that will not fit into one buffer. */ if (dev->si_flags & SI_NOSPLIT && - (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS || + (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys || uio->uio_iovcnt > 1)) { /* * Tell the user why his I/O was rejected. @@ -78,10 +78,10 @@ uprintf("%s: request size=%zd > si_iosize_max=%d; " "cannot split request\n", devtoname(dev), uio->uio_resid, dev->si_iosize_max); - if (uio->uio_resid > MAXPHYS) - uprintf("%s: request size=%zd > MAXPHYS=%d; " + if (uio->uio_resid > maxphys) + uprintf("%s: request size=%zd > maxphys=%lu; " "cannot split request\n", devtoname(dev), - uio->uio_resid, MAXPHYS); + uio->uio_resid, maxphys); if (uio->uio_iovcnt > 1) uprintf("%s: request vectors=%d > 1; " "cannot split request\n", devtoname(dev), @@ -101,12 +101,13 @@ pages = NULL; } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { pbuf = NULL; - maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1; + maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1; pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK); } else { pbuf = uma_zalloc(pbuf_zone, M_WAITOK); + MPASS((pbuf->b_flags & B_MAXPHYS) != 0); sa = pbuf->b_data; - maxpages = btoc(MAXPHYS); + maxpages = btoc(maxphys); pages = pbuf->b_pages; } prot = VM_PROT_READ; @@ -144,13 +145,13 @@ bp->bio_length = uio->uio_iov[i].iov_len; if (bp->bio_length > dev->si_iosize_max) bp->bio_length = dev->si_iosize_max; - if (bp->bio_length > MAXPHYS) - bp->bio_length = MAXPHYS; + if (bp->bio_length > maxphys) + bp->bio_length = maxphys; /* * Make sure the pbuf can map the request. - * The pbuf has kvasize = MAXPHYS, so a request - * larger than MAXPHYS - PAGE_SIZE must be + * The pbuf has kvasize = maxphys, so a request + * larger than maxphys - PAGE_SIZE must be * page aligned or it will be fragmented. */ poff = (vm_offset_t)base & PAGE_MASK; Index: sys/kern/kern_sendfile.c =================================================================== --- sys/kern/kern_sendfile.c +++ sys/kern/kern_sendfile.c @@ -885,7 +885,7 @@ * do any heuristics and use exactly the value supplied by * application. Otherwise, we allow readahead up to "rem". * If application wants more, let it be, but there is no - * reason to go above MAXPHYS. Also check against "obj_size", + * reason to go above maxphys. Also check against "obj_size", * since vm_pager_has_page() can hint beyond EOF. */ if (flags & SF_USER_READAHEAD) { @@ -895,7 +895,7 @@ npages; rhpages += SF_READAHEAD(flags); } - rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages); + rhpages = min(howmany(maxphys, PAGE_SIZE), rhpages); rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) - npages, rhpages); Index: sys/kern/subr_param.c =================================================================== --- sys/kern/subr_param.c +++ sys/kern/subr_param.c @@ -41,6 +41,7 @@ #include "opt_param.h" #include "opt_msgbuf.h" +#include "opt_maxphys.h" #include "opt_maxusers.h" #include @@ -99,9 +100,10 @@ int ngroups_max; /* max # groups per process */ int nswbuf; pid_t pid_max = PID_MAX; -long maxswzone; /* max swmeta KVA storage */ -long maxbcache; /* max buffer cache KVA storage */ -long maxpipekva; /* Limit on pipe KVA */ +u_long maxswzone; /* max swmeta KVA storage */ +u_long maxbcache; /* max buffer cache KVA storage */ +u_long maxpipekva; /* Limit on pipe KVA */ +u_long maxphys; int vm_guest = VM_GUEST_NO; /* Running as virtual machine guest? */ u_long maxtsiz; /* max text size */ u_long dfldsiz; /* initial data size limit */ @@ -289,6 +291,8 @@ nbuf = NBUF; TUNABLE_INT_FETCH("kern.nbuf", &nbuf); TUNABLE_INT_FETCH("kern.bio_transient_maxcnt", &bio_transient_maxcnt); + maxphys = MAXPHYS; + TUNABLE_ULONG_FETCH("kern.maxphys", &maxphys); /* * Physical buffers are pre-allocated buffers (struct buf) that @@ -300,7 +304,7 @@ * The default for maxpipekva is min(1/64 of the kernel address space, * max(1/64 of main memory, 512KB)). See sys_pipe.c for more details. */ - maxpipekva = (physpages / 64) * PAGE_SIZE; + maxpipekva = ptoa(physpages / 64); TUNABLE_LONG_FETCH("kern.ipc.maxpipekva", &maxpipekva); if (maxpipekva < 512 * 1024) maxpipekva = 512 * 1024; Index: sys/kern/vfs_aio.c =================================================================== --- sys/kern/vfs_aio.c +++ sys/kern/vfs_aio.c @@ -1252,14 +1252,16 @@ ki = p->p_aioinfo; poff = (vm_offset_t)cb->aio_buf & PAGE_MASK; if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) { - if (cb->aio_nbytes > MAXPHYS) { + if (cb->aio_nbytes > maxphys) { error = -1; goto unref; } pbuf = NULL; + job->pages = malloc(sizeof(vm_page_t) * atop(round_page( + cb->aio_nbytes)) + 1, M_TEMP, M_WAITOK | M_ZERO); } else { - if (cb->aio_nbytes > MAXPHYS - poff) { + if (cb->aio_nbytes > maxphys - poff) { error = -1; goto unref; } @@ -1273,6 +1275,7 @@ AIO_LOCK(ki); ki->kaio_buffer_count++; AIO_UNLOCK(ki); + job->pages = pbuf->b_pages; } job->bp = bp = g_alloc_bio(); @@ -1320,6 +1323,8 @@ AIO_UNLOCK(ki); uma_zfree(pbuf_zone, pbuf); job->pbuf = NULL; + } else { + free(job->pages, M_TEMP); } g_destroy_bio(bp); job->bp = NULL; @@ -2342,7 +2347,8 @@ /* Release mapping into kernel space. */ userp = job->userproc; ki = userp->p_aioinfo; - if (job->pbuf) { + vm_page_unhold_pages(job->pages, job->npages); + if (job->pbuf != NULL) { pmap_qremove((vm_offset_t)job->pbuf->b_data, job->npages); uma_zfree(pbuf_zone, job->pbuf); job->pbuf = NULL; @@ -2350,9 +2356,10 @@ AIO_LOCK(ki); ki->kaio_buffer_count--; AIO_UNLOCK(ki); - } else + } else { + free(job->pages, M_TEMP); atomic_subtract_int(&num_unmapped_aio, 1); - vm_page_unhold_pages(job->pages, job->npages); + } bp = job->bp; job->bp = NULL; Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -147,8 +147,14 @@ #define BD_RUN_UNLOCK(bd) mtx_unlock(BD_RUN_LOCKPTR((bd))) #define BD_DOMAIN(bd) (bd - bdomain) -static struct buf *buf; /* buffer header pool */ -extern struct buf *swbuf; /* Swap buffer header pool. */ +static char *buf; /* buffer header pool */ +static struct buf * +nbufp(unsigned i) +{ + return ((struct buf *)(buf + (sizeof(struct buf) + + sizeof(vm_page_t) * atop(maxbcachebuf)) * i)); +} + caddr_t __read_mostly unmapped_buf; /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */ @@ -994,8 +1000,8 @@ maxbcachebuf = i; if (maxbcachebuf < MAXBSIZE) maxbcachebuf = MAXBSIZE; - if (maxbcachebuf > MAXPHYS) - maxbcachebuf = MAXPHYS; + if (maxbcachebuf > maxphys) + maxbcachebuf = maxphys; if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF) printf("maxbcachebuf=%d\n", maxbcachebuf); } @@ -1113,10 +1119,10 @@ biotmap_sz = buf_sz / TRANSIENT_DENOM; buf_sz -= biotmap_sz; } - if (biotmap_sz / INT_MAX > MAXPHYS) + if (biotmap_sz / INT_MAX > maxphys) bio_transient_maxcnt = INT_MAX; else - bio_transient_maxcnt = biotmap_sz / MAXPHYS; + bio_transient_maxcnt = biotmap_sz / maxphys; /* * Artificially limit to 1024 simultaneous in-flight I/Os * using the transient mapping. @@ -1136,10 +1142,11 @@ /* * Reserve space for the buffer cache buffers */ - buf = (void *)v; - v = (caddr_t)(buf + nbuf); + buf = (char *)v; + v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) * + atop(maxbcachebuf)) * nbuf; - return(v); + return (v); } /* Initialize the buffer subsystem. Called before use of any buffers. */ @@ -1157,12 +1164,12 @@ mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF); mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF); - unmapped_buf = (caddr_t)kva_alloc(MAXPHYS); + unmapped_buf = (caddr_t)kva_alloc(maxphys); /* finally, initialize each buffer header and stick on empty q */ for (i = 0; i < nbuf; i++) { - bp = &buf[i]; - bzero(bp, sizeof *bp); + bp = nbufp(i); + bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf)); bp->b_flags = B_INVAL; bp->b_rcred = NOCRED; bp->b_wcred = NOCRED; @@ -1246,7 +1253,8 @@ /* Setup the kva and free list allocators. */ vmem_set_reclaim(buffer_arena, bufkva_reclaim); - buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf), + buf_zone = uma_zcache_create("buf free cache", + sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf), NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0); /* @@ -1295,7 +1303,7 @@ KASSERT(bp->b_data != unmapped_buf, ("mapped buf: b_data was not updated %p", bp)); KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf + - MAXPHYS, ("b_data + b_offset unmapped %p", bp)); + maxphys, ("b_data + b_offset unmapped %p", bp)); } static inline void @@ -1330,7 +1338,7 @@ { static int first_buf_printf = 1; struct buf *bp; - int iter, nbusy, pbusy; + int i, iter, nbusy, pbusy; #ifndef PREEMPTION int subiter; #endif @@ -1348,9 +1356,11 @@ */ for (iter = pbusy = 0; iter < 20; iter++) { nbusy = 0; - for (bp = &buf[nbuf]; --bp >= buf; ) + for (i = nbuf - 1; i >= 0; i--) { + bp = nbufp(i); if (isbufbusy(bp)) nbusy++; + } if (nbusy == 0) { if (first_buf_printf) printf("All buffers synced."); @@ -1391,7 +1401,8 @@ * a fsck if we're just a client of a wedged NFS server */ nbusy = 0; - for (bp = &buf[nbuf]; --bp >= buf; ) { + for (i = nbuf - 1; i >= 0; i--) { + bp = nbufp(i); if (isbufbusy(bp)) { #if 0 /* XXX: This is bogus. We should probably have a BO_REMOTE flag instead */ @@ -1571,6 +1582,7 @@ buf_deallocate(bp); bufkva_free(bp); atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1); + MPASS((bp->b_flags & B_MAXPHYS) == 0); BUF_UNLOCK(bp); uma_zfree(buf_zone, bp); } @@ -1674,6 +1686,7 @@ ("bp: %p still has %d vm pages\n", bp, bp->b_npages)); KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp)); KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp)); + MPASS((bp->b_flags & B_MAXPHYS) == 0); bp->b_domain = BD_DOMAIN(bd); bp->b_flags = 0; @@ -2018,6 +2031,9 @@ KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0, ("Invalid gbflags 0x%x in %s", gbflags, __func__)); + MPASS((bp->b_flags & B_MAXPHYS) == 0); + KASSERT(maxsize <= maxbcachebuf, + ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf)); bufkva_free(bp); @@ -3036,6 +3052,10 @@ */ obj = bp->b_bufobj->bo_object; if (bp->b_npages < desiredpages) { + KASSERT(desiredpages <= atop(maxbcachebuf), + ("vfs_vmio_extend past maxbcachebuf %p %d %u", + bp, desiredpages, maxbcachebuf)); + /* * We must allocate system pages since blocking * here could interfere with paging I/O, no @@ -3163,7 +3183,7 @@ (vp->v_mount != 0) && /* Only on nodes that have the size info */ (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { size = vp->v_mount->mnt_stat.f_iosize; - maxcl = MAXPHYS / size; + maxcl = maxphys / size; BO_RLOCK(bo); for (i = 1; i < maxcl; i++) @@ -4853,6 +4873,10 @@ to = round_page(to); from = round_page(from); index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; + MPASS((bp->b_flags & B_MAXPHYS) == 0); + KASSERT(to - from <= maxbcachebuf, + ("vm_hold_load_pages too large %p %#jx %#jx %u", + bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf)); for (pg = from; pg < to; pg += PAGE_SIZE, index++) { /* @@ -4912,12 +4936,12 @@ vm_prot_t prot; int pidx; + MPASS((bp->b_flags & B_MAXPHYS) != 0); prot = VM_PROT_READ; if (bp->b_iocmd == BIO_READ) prot |= VM_PROT_WRITE; /* Less backwards than it looks */ if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, - (vm_offset_t)uaddr, len, prot, bp->b_pages, - btoc(MAXPHYS))) < 0) + (vm_offset_t)uaddr, len, prot, bp->b_pages, btoc(maxphys))) < 0) return (-1); bp->b_bufsize = len; bp->b_npages = pidx; @@ -5398,19 +5422,23 @@ db_printf("\n"); cnt = 0; total = 0; - for (j = 0; j < nbuf; j++) - if (buf[j].b_domain == i && BUF_ISLOCKED(&buf[j])) { + for (j = 0; j < nbuf; j++) { + bp = nbufp(j); + if (bp->b_domain == i && BUF_ISLOCKED(bp)) { cnt++; - total += buf[j].b_bufsize; + total += bp->b_bufsize; } + } db_printf("\tLocked buffers: %d space %ld\n", cnt, total); cnt = 0; total = 0; - for (j = 0; j < nbuf; j++) - if (buf[j].b_domain == i) { + for (j = 0; j < nbuf; j++) { + bp = nbufp(j); + if (bp->b_domain == i) { cnt++; - total += buf[j].b_bufsize; + total += bp->b_bufsize; } + } db_printf("\tTotal buffers: %d space %ld\n", cnt, total); } } @@ -5421,7 +5449,7 @@ int i; for (i = 0; i < nbuf; i++) { - bp = &buf[i]; + bp = nbufp(i); if (BUF_ISLOCKED(bp)) { db_show_buffer((uintptr_t)bp, 1, 0, NULL); db_printf("\n"); @@ -5464,7 +5492,7 @@ } for (i = 0; i < nbuf; i++) { - bp = &buf[i]; + bp = nbufp(i); if (bp->b_qindex == QUEUE_EMPTY) nfree++; else Index: sys/kern/vfs_cluster.c =================================================================== --- sys/kern/vfs_cluster.c +++ sys/kern/vfs_cluster.c @@ -386,6 +386,7 @@ bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT); if (bp == NULL) return tbp; + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* * We are synthesizing a buffer out of vm_page_t's, but @@ -871,6 +872,7 @@ --len; continue; } + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* * We got a pbuf to make the cluster in. Index: sys/kern/vfs_default.c =================================================================== --- sys/kern/vfs_default.c +++ sys/kern/vfs_default.c @@ -974,8 +974,8 @@ iosize = vap->va_blocksize; if (iosize == 0) iosize = BLKDEV_IOSIZE; - if (iosize > MAXPHYS) - iosize = MAXPHYS; + if (iosize > maxphys) + iosize = maxphys; buf = malloc(iosize, M_TEMP, M_WAITOK); #ifdef __notyet__ Index: sys/mips/ingenic/jz4780_mmc.c =================================================================== --- sys/mips/ingenic/jz4780_mmc.c +++ sys/mips/ingenic/jz4780_mmc.c @@ -58,7 +58,7 @@ #define JZ_MSC_IRQRES 1 #define JZ_MSC_RESSZ 2 #define JZ_MSC_DMA_SEGS 128 -#define JZ_MSC_DMA_MAX_SIZE MAXPHYS +#define JZ_MSC_DMA_MAX_SIZE maxphys #define JZ_MSC_INT_ERR_BITS (JZ_INT_CRC_RES_ERR | JZ_INT_CRC_READ_ERR | \ JZ_INT_CRC_WRITE_ERR | JZ_INT_TIMEOUT_RES | \ Index: sys/net/if.c =================================================================== --- sys/net/if.c +++ sys/net/if.c @@ -3162,8 +3162,8 @@ struct sbuf *sb; int error, full = 0, valid_len, max_len; - /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ - max_len = MAXPHYS - 1; + /* Limit initial buffer size to maxphys to avoid DoS from userspace. */ + max_len = maxphys - 1; /* Prevent hostile input from being able to crash the system */ if (ifc->ifc_len <= 0) Index: sys/powerpc/mambo/mambo_disk.c =================================================================== --- sys/powerpc/mambo/mambo_disk.c +++ sys/powerpc/mambo/mambo_disk.c @@ -115,7 +115,7 @@ d->d_strategy = mambodisk_strategy; d->d_name = "mambodisk"; d->d_drv1 = sc; - d->d_maxsize = MAXPHYS; /* Maybe ask bridge? */ + d->d_maxsize = maxphys; /* Maybe ask bridge? */ d->d_sectorsize = 512; sc->maxblocks = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_BLKSZ,d->d_unit) Index: sys/powerpc/mpc85xx/fsl_sata.c =================================================================== --- sys/powerpc/mpc85xx/fsl_sata.c +++ sys/powerpc/mpc85xx/fsl_sata.c @@ -1870,7 +1870,7 @@ cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; - cpi->maxio = MAXPHYS; + cpi->maxio = maxphys; cpi->ccb_h.status = CAM_REQ_CMP; break; } Index: sys/sys/aio.h =================================================================== --- sys/sys/aio.h +++ sys/sys/aio.h @@ -140,8 +140,8 @@ struct { /* BIO backend */ struct bio *bp; /* (*) BIO pointer */ struct buf *pbuf; /* (*) buffer pointer */ - struct vm_page *pages[btoc(MAXPHYS)+1]; /* (*) */ int npages; /* (*) number of pages */ + struct vm_page **pages; /* (*) */ }; struct { /* fsync() requests */ int pending; /* (a) number of pending I/O */ Index: sys/sys/buf.h =================================================================== --- sys/sys/buf.h +++ sys/sys/buf.h @@ -141,7 +141,6 @@ TAILQ_HEAD(cluster_list_head, buf) cluster_head; TAILQ_ENTRY(buf) cluster_entry; } b_cluster; - struct vm_page *b_pages[btoc(MAXPHYS)]; int b_npages; struct workhead b_dep; /* (D) List of filesystem dependencies. */ void *b_fsprivate1; @@ -156,6 +155,7 @@ #elif defined(BUF_TRACKING) const char *b_io_tracking; #endif + struct vm_page *b_pages[]; }; #define b_object b_bufobj->bo_object @@ -234,7 +234,7 @@ #define B_INVALONERR 0x00040000 /* Invalidate on write error. */ #define B_00080000 0x00080000 /* Available flag. */ #define B_00100000 0x00100000 /* Available flag. */ -#define B_00200000 0x00200000 /* Available flag. */ +#define B_MAXPHYS 0x00200000 /* nitems(b_pages[]) = atop(MAXPHYS). */ #define B_RELBUF 0x00400000 /* Release VMIO buffer. */ #define B_FS_FLAG1 0x00800000 /* Available flag for FS use. */ #define B_NOCOPY 0x01000000 /* Don't copy-on-write this buf. */ @@ -247,7 +247,7 @@ #define B_REMFREE 0x80000000 /* Delayed bremfree */ #define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34managed" \ - "\33paging\32infreecnt\31nocopy\30b23\27relbuf\26b21\25b20" \ + "\33paging\32infreecnt\31nocopy\30b23\27relbuf\26maxphys\25b20" \ "\24b19\23invalonerr\22clusterok\21malloc\20nocache\17b14\16inval" \ "\15reuse\14noreuse\13eintr\12done\11b8\10delwri" \ "\7validsuspwrt\6cache\5deferred\4direct\3async\2needcommit\1age" @@ -496,8 +496,8 @@ #ifdef _KERNEL extern int nbuf; /* The number of buffer headers */ -extern long maxswzone; /* Max KVA for swap structures */ -extern long maxbcache; /* Max KVA for buffer cache */ +extern u_long maxswzone; /* Max KVA for swap structures */ +extern u_long maxbcache; /* Max KVA for buffer cache */ extern int maxbcachebuf; /* Max buffer cache block size */ extern long runningbufspace; extern long hibufspace; Index: sys/sys/param.h =================================================================== --- sys/sys/param.h +++ sys/sys/param.h @@ -160,7 +160,7 @@ #define DFLTPHYS (64 * 1024) /* default max raw I/O transfer size */ #endif #ifndef MAXPHYS -#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */ +#define MAXPHYS (1024 * 1024) /* max raw I/O transfer size */ #endif #ifndef MAXDUMPPGS #define MAXDUMPPGS (DFLTPHYS/PAGE_SIZE) Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h +++ sys/sys/systm.h @@ -74,6 +74,8 @@ extern int ngroups_max; /* max # of supplemental groups */ extern int vm_guest; /* Running as virtual machine guest? */ +extern u_long maxphys; + /* * Detected virtual machine guest types. The intention is to expand * and/or add to the VM_GUEST_VM type if specific VM functionality is Index: sys/ufs/ffs/ffs_vfsops.c =================================================================== --- sys/ufs/ffs/ffs_vfsops.c +++ sys/ufs/ffs/ffs_vfsops.c @@ -1055,8 +1055,8 @@ BO_UNLOCK(&odevvp->v_bufobj); if (dev->si_iosize_max != 0) mp->mnt_iosize_max = dev->si_iosize_max; - if (mp->mnt_iosize_max > MAXPHYS) - mp->mnt_iosize_max = MAXPHYS; + if (mp->mnt_iosize_max > maxphys) + mp->mnt_iosize_max = maxphys; if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { error = EINVAL; vfs_mount_error(mp, Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c +++ sys/vm/swap_pager.c @@ -586,7 +586,7 @@ * but it isn't very efficient). * * The nsw_cluster_max is constrained by the bp->b_pages[] - * array, which has MAXPHYS / PAGE_SIZE entries, and our locally + * array, which has maxphys / PAGE_SIZE entries, and our locally * defined MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are * constrained by the swap device interleave stripe size. * @@ -601,7 +601,7 @@ * have one NFS swap device due to the command/ack latency over NFS. * So it all works out pretty well. */ - nsw_cluster_max = min(MAXPHYS / PAGE_SIZE, MAX_PAGEOUT_CLUSTER); + nsw_cluster_max = min(maxphys / PAGE_SIZE, MAX_PAGEOUT_CLUSTER); nsw_wcount_async = 4; nsw_wcount_async_max = nsw_wcount_async; @@ -1314,6 +1314,7 @@ VM_OBJECT_WUNLOCK(object); bp = uma_zalloc(swrbuf_zone, M_WAITOK); + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* Pages cannot leave the object while busy. */ for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) { MPASS(p->pindex == bm->pindex + i); @@ -1522,8 +1523,9 @@ VM_OBJECT_WUNLOCK(object); bp = uma_zalloc(swwbuf_zone, M_WAITOK); + MPASS((bp->b_flags & B_MAXPHYS) != 0); if (async) - bp->b_flags = B_ASYNC; + bp->b_flags |= B_ASYNC; bp->b_flags |= B_PAGING; bp->b_iocmd = BIO_WRITE; Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -115,7 +115,6 @@ #define PFFOR 4 #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) -#define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) #define VM_FAULT_DONTNEED_MIN 1048576 Index: sys/vm/vm_init.c =================================================================== --- sys/vm/vm_init.c +++ sys/vm/vm_init.c @@ -212,7 +212,7 @@ /* * Allocate the clean map to hold all of I/O virtual memory. */ - size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * MAXPHYS; + size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * maxphys; kmi->clean_sva = firstaddr = kva_alloc(size); kmi->clean_eva = firstaddr + size; @@ -233,7 +233,7 @@ * And optionally transient bio space. */ if (bio_transient_maxcnt != 0) { - size = (long)bio_transient_maxcnt * MAXPHYS; + size = (long)bio_transient_maxcnt * maxphys; vmem_init(transient_arena, "transient arena", firstaddr, size, PAGE_SIZE, 0, 0); firstaddr += size; Index: sys/vm/vm_map.h =================================================================== --- sys/vm/vm_map.h +++ sys/vm/vm_map.h @@ -396,7 +396,7 @@ */ #define VM_FAULT_READ_AHEAD_MIN 7 #define VM_FAULT_READ_AHEAD_INIT 15 -#define VM_FAULT_READ_AHEAD_MAX min(atop(MAXPHYS) - 1, UINT8_MAX) +#define VM_FAULT_READ_AHEAD_MAX min(atop(maxphys) - 1, UINT8_MAX) /* * The following "find_space" options are supported by vm_map_find(). Index: sys/vm/vm_pager.c =================================================================== --- sys/vm/vm_pager.c +++ sys/vm/vm_pager.c @@ -94,6 +94,12 @@ static int pbuf_ctor(void *, int, void *, int); static void pbuf_dtor(void *, int, void *); +/* + * Number of pages that pbuf buffer can store in b_pages. + * It is +1 to allow for unaligned data buffer of maxphys size. + */ +#define PBUF_PAGES (atop(maxphys) + 1) + static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, struct ucred *); @@ -183,7 +189,8 @@ { /* Main zone for paging bufs. */ - pbuf_zone = uma_zcreate("pbuf", sizeof(struct buf), + pbuf_zone = uma_zcreate("pbuf", + sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t), pbuf_ctor, pbuf_dtor, pbuf_init, NULL, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); /* Few systems may still use this zone directly, so it needs a limit. */ @@ -384,7 +391,7 @@ bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */ bp->b_data = bp->b_kvabase; bp->b_xflags = 0; - bp->b_flags = 0; + bp->b_flags = B_MAXPHYS; bp->b_ioflags = 0; bp->b_iodone = NULL; bp->b_error = 0; @@ -415,10 +422,10 @@ { struct buf *bp = mem; - bp->b_kvabase = (void *)kva_alloc(MAXPHYS); + bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES)); if (bp->b_kvabase == NULL) return (ENOMEM); - bp->b_kvasize = MAXPHYS; + bp->b_kvasize = ptoa(PBUF_PAGES); BUF_LOCKINIT(bp); LIST_INIT(&bp->b_dep); bp->b_rcred = bp->b_wcred = NOCRED; Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c +++ sys/vm/vnode_pager.c @@ -817,7 +817,7 @@ KASSERT(foff < object->un_pager.vnp.vnp_size, ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); - KASSERT(count <= nitems(bp->b_pages), + KASSERT(count <= atop(maxphys), ("%s: requested %d pages", __func__, count)); /* @@ -832,6 +832,7 @@ } bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); + MPASS((bp->b_flags & B_MAXPHYS) != 0); /* * Get the underlying device blocks for the file with VOP_BMAP(). @@ -916,10 +917,10 @@ * Check that total amount of pages fit into buf. Trim rbehind and * rahead evenly if not. */ - if (rbehind + rahead + count > nitems(bp->b_pages)) { + if (rbehind + rahead + count > atop(maxphys)) { int trim, sum; - trim = rbehind + rahead + count - nitems(bp->b_pages) + 1; + trim = rbehind + rahead + count - atop(maxphys) + 1; sum = rbehind + rahead; if (rbehind == before) { /* Roundup rbehind trim to block size. */ @@ -930,9 +931,9 @@ rbehind -= trim * rbehind / sum; rahead -= trim * rahead / sum; } - KASSERT(rbehind + rahead + count <= nitems(bp->b_pages), - ("%s: behind %d ahead %d count %d", __func__, - rbehind, rahead, count)); + KASSERT(rbehind + rahead + count <= atop(maxphys), + ("%s: behind %d ahead %d count %d maxphys %lu", __func__, + rbehind, rahead, count, maxphys)); /* * Fill in the bp->b_pages[] array with requested and optional @@ -1014,7 +1015,7 @@ *a_rahead = bp->b_pgafter; #ifdef INVARIANTS - KASSERT(bp->b_npages <= nitems(bp->b_pages), + KASSERT(bp->b_npages <= atop(maxphys), ("%s: buf %p overflowed", __func__, bp)); for (int j = 1, prev = 0; j < bp->b_npages; j++) { if (bp->b_pages[j] == bogus_page)