Page MenuHomeFreeBSD

D2665.diff
No OneTemporary

D2665.diff

Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -3935,7 +3935,6 @@
pd_entry_t newpde;
pt_entry_t *firstpte, oldpte, pa, *pte;
pt_entry_t PG_G, PG_A, PG_M, PG_RW, PG_V;
- vm_offset_t oldpteva;
vm_page_t mpte;
int PG_PTE_CACHE;
@@ -3995,10 +3994,9 @@
if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
goto setpte;
oldpte &= ~PG_RW;
- oldpteva = (oldpte & PG_FRAME & PDRMASK) |
- (va & ~PDRMASK);
CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
- " in pmap %p", oldpteva, pmap);
+ " in pmap %p", (oldpte & PG_FRAME & PDRMASK) |
+ (va & ~PDRMASK), pmap);
}
if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
atomic_add_long(&pmap_pde_p_failures, 1);
Index: sys/amd64/amd64/vm_machdep.c
===================================================================
--- sys/amd64/amd64/vm_machdep.c
+++ sys/amd64/amd64/vm_machdep.c
@@ -155,7 +155,6 @@
struct pcb *pcb2;
struct mdproc *mdp1, *mdp2;
struct proc_ldt *pldt;
- pmap_t pmap2;
p1 = td1->td_proc;
if ((flags & RFPROC) == 0) {
@@ -218,7 +217,6 @@
* Set registers for trampoline to user mode. Leave space for the
* return address on stack. These are the kernel mode register values.
*/
- pmap2 = vmspace_pmap(p2->p_vmspace);
pcb2->pcb_r12 = (register_t)fork_return; /* fork_trampoline argument */
pcb2->pcb_rbp = 0;
pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *);
Index: sys/amd64/ia32/ia32_reg.c
===================================================================
--- sys/amd64/ia32/ia32_reg.c
+++ sys/amd64/ia32/ia32_reg.c
@@ -79,11 +79,9 @@
int
fill_regs32(struct thread *td, struct reg32 *regs)
{
- struct pcb *pcb;
struct trapframe *tp;
tp = td->td_frame;
- pcb = td->td_pcb;
if (tp->tf_flags & TF_HASSEGS) {
regs->r_gs = tp->tf_gs;
regs->r_fs = tp->tf_fs;
@@ -113,18 +111,16 @@
int
set_regs32(struct thread *td, struct reg32 *regs)
{
- struct pcb *pcb;
struct trapframe *tp;
tp = td->td_frame;
if (!EFL_SECURE(regs->r_eflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
return (EINVAL);
- pcb = td->td_pcb;
tp->tf_gs = regs->r_gs;
tp->tf_fs = regs->r_fs;
tp->tf_es = regs->r_es;
tp->tf_ds = regs->r_ds;
- set_pcb_flags(pcb, PCB_FULL_IRET);
+ set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
tp->tf_flags = TF_HASSEGS;
tp->tf_rdi = regs->r_edi;
tp->tf_rsi = regs->r_esi;
Index: sys/dev/pci/pci.c
===================================================================
--- sys/dev/pci/pci.c
+++ sys/dev/pci/pci.c
@@ -2389,7 +2389,7 @@
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
uint16_t status;
- int result, oldstate, highest, delay;
+ int oldstate, highest, delay;
if (cfg->pp.pp_cap == 0)
return (EOPNOTSUPP);
@@ -2424,7 +2424,6 @@
delay = 0;
status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
& ~PCIM_PSTAT_DMASK;
- result = 0;
switch (state) {
case PCI_POWERSTATE_D0:
status |= PCIM_PSTAT_D0;
@@ -2989,7 +2988,6 @@
pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
uint32_t prefetchmask)
{
- struct resource *r;
int rid, type, progif;
#if 0
/* if this device supports PCI native addressing use it */
@@ -3012,11 +3010,11 @@
} else {
rid = PCIR_BAR(0);
resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
- r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
+ (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
0x1f7, 8, 0);
rid = PCIR_BAR(1);
resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
- r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
+ (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
0x3f6, 1, 0);
}
if (progif & PCIP_STORAGE_IDE_MODESEC) {
@@ -3027,11 +3025,11 @@
} else {
rid = PCIR_BAR(2);
resource_list_add(rl, type, rid, 0x170, 0x177, 8);
- r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
+ (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
0x177, 8, 0);
rid = PCIR_BAR(3);
resource_list_add(rl, type, rid, 0x376, 0x376, 1);
- r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
+ (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
0x376, 1, 0);
}
pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
@@ -3727,7 +3725,6 @@
static void
pci_set_power_child(device_t dev, device_t child, int state)
{
- struct pci_devinfo *dinfo;
device_t pcib;
int dstate;
@@ -3739,7 +3736,6 @@
* are handled separately.
*/
pcib = device_get_parent(dev);
- dinfo = device_get_ivars(child);
dstate = state;
if (device_is_attached(child) &&
PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
Index: sys/kern/kern_exit.c
===================================================================
--- sys/kern/kern_exit.c
+++ sys/kern/kern_exit.c
@@ -964,12 +964,10 @@
int *status, int options, struct __wrusage *wrusage, siginfo_t *siginfo,
int check_only)
{
- struct proc *q;
struct rusage *rup;
sx_assert(&proctree_lock, SA_XLOCKED);
- q = td->td_proc;
PROC_LOCK(p);
switch (idtype) {
Index: sys/kern/kern_synch.c
===================================================================
--- sys/kern/kern_synch.c
+++ sys/kern/kern_synch.c
@@ -414,11 +414,9 @@
{
uint64_t runtime, new_switchtime;
struct thread *td;
- struct proc *p;
td = curthread; /* XXX */
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
- p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
@@ -458,7 +456,7 @@
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
+ td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@@ -474,7 +472,7 @@
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
+ td->td_tid, td->td_sched, td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.
Index: sys/kern/vfs_cluster.c
===================================================================
--- sys/kern/vfs_cluster.c
+++ sys/kern/vfs_cluster.c
@@ -310,7 +310,6 @@
cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
{
- struct bufobj *bo;
struct buf *bp, *tbp;
daddr_t bn;
off_t off;
@@ -376,7 +375,6 @@
bp->b_npages = 0;
inc = btodb(size);
- bo = &vp->v_bufobj;
for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
if (i == 0) {
VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
Index: sys/kern/vfs_init.c
===================================================================
--- sys/kern/vfs_init.c
+++ sys/kern/vfs_init.c
@@ -311,9 +311,7 @@
vfs_unregister(struct vfsconf *vfc)
{
struct vfsconf *vfsp;
- int error, i, maxtypenum;
-
- i = vfc->vfc_typenum;
+ int error, maxtypenum;
vfsconf_lock();
vfsp = vfs_byname_locked(vfc->vfc_name);
Index: sys/ufs/ffs/ffs_softdep.c
===================================================================
--- sys/ufs/ffs/ffs_softdep.c
+++ sys/ufs/ffs/ffs_softdep.c
@@ -4691,12 +4691,10 @@
struct inodedep *inodedep;
struct jaddref *jaddref;
struct vnode *dvp;
- struct vnode *vp;
KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
("softdep_setup_dotdot_link called on non-softdep filesystem"));
dvp = ITOV(dp);
- vp = ITOV(ip);
jaddref = NULL;
/*
* We don't set MKDIR_PARENT as this is not tied to a mkdir and
@@ -7052,7 +7050,6 @@
struct bufobj *bo;
struct vnode *vp;
struct buf *bp;
- struct fs *fs;
int blkoff;
/*
@@ -7061,7 +7058,6 @@
* Once they are all there, walk the list and get rid of
* any dependencies.
*/
- fs = ip->i_fs;
vp = ITOV(ip);
bo = &vp->v_bufobj;
BO_LOCK(bo);
@@ -9493,12 +9489,10 @@
struct buf *bp;
{
struct inodedep *inodedep;
- struct mount *mp;
struct fs *fs;
LOCK_OWNED(sbdep->sb_ump);
fs = sbdep->sb_fs;
- mp = UFSTOVFS(sbdep->sb_ump);
/*
* If the superblock doesn't match the in-memory list start over.
*/
Index: sys/ufs/ffs/ffs_suspend.c
===================================================================
--- sys/ufs/ffs/ffs_suspend.c
+++ sys/ufs/ffs/ffs_suspend.c
@@ -177,7 +177,6 @@
static int
ffs_susp_suspend(struct mount *mp)
{
- struct fs *fs;
struct ufsmount *ump;
int error;
@@ -189,7 +188,6 @@
return (EBUSY);
ump = VFSTOUFS(mp);
- fs = ump->um_fs;
/*
* Make sure the calling thread is permitted to access the mounted
Index: sys/ufs/ffs/ffs_vfsops.c
===================================================================
--- sys/ufs/ffs/ffs_vfsops.c
+++ sys/ufs/ffs/ffs_vfsops.c
@@ -1486,7 +1486,7 @@
struct inode *ip;
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
- int error, count, wait, lockreq, allerror = 0;
+ int error, count, lockreq, allerror = 0;
int suspend;
int suspended;
int secondary_writes;
@@ -1495,7 +1495,6 @@
int softdep_accdeps;
struct bufobj *bo;
- wait = 0;
suspend = 0;
suspended = 0;
td = curthread;
@@ -1517,10 +1516,8 @@
suspend = 1;
waitfor = MNT_WAIT;
}
- if (waitfor == MNT_WAIT) {
- wait = 1;
+ if (waitfor == MNT_WAIT)
lockreq = LK_EXCLUSIVE;
- }
lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
loop:
/* Grab snapshot of secondary write counts */
@@ -2024,7 +2021,6 @@
ffs_bufwrite(struct buf *bp)
{
struct buf *newbp;
- int oldflags;
CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
if (bp->b_flags & B_INVAL) {
@@ -2032,8 +2028,6 @@
return (0);
}
- oldflags = bp->b_flags;
-
if (!BUF_ISLOCKED(bp))
panic("bufwrite: buffer is not busy???");
/*
Index: sys/ufs/ffs/ffs_vnops.c
===================================================================
--- sys/ufs/ffs/ffs_vnops.c
+++ sys/ufs/ffs/ffs_vnops.c
@@ -1366,11 +1366,6 @@
};
*/
{
- struct inode *ip;
- struct fs *fs;
-
- ip = VTOI(ap->a_vp);
- fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@@ -1394,11 +1389,6 @@
};
*/
{
- struct inode *ip;
- struct fs *fs;
-
- ip = VTOI(ap->a_vp);
- fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@@ -1512,13 +1502,11 @@
*/
{
struct inode *ip;
- struct fs *fs;
u_char *eae, *p;
unsigned easize;
int error, ealen;
ip = VTOI(ap->a_vp);
- fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
@@ -1567,14 +1555,12 @@
*/
{
struct inode *ip;
- struct fs *fs;
u_char *eae, *p, *pe, *pn;
unsigned easize;
uint32_t ul;
int error, ealen;
ip = VTOI(ap->a_vp);
- fs = ip->i_fs;
if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
return (EOPNOTSUPP);
Index: sys/ufs/ufs/ufs_bmap.c
===================================================================
--- sys/ufs/ufs/ufs_bmap.c
+++ sys/ufs/ufs/ufs_bmap.c
@@ -114,7 +114,6 @@
struct buf *bp;
struct ufsmount *ump;
struct mount *mp;
- struct vnode *devvp;
struct indir a[NIADDR+1], *ap;
ufs2_daddr_t daddr;
ufs_lbn_t metalbn;
@@ -125,7 +124,6 @@
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOUFS(mp);
- devvp = ump->um_devvp;
if (runp) {
maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;
Index: sys/ufs/ufs/ufs_dirhash.c
===================================================================
--- sys/ufs/ufs/ufs_dirhash.c
+++ sys/ufs/ufs/ufs_dirhash.c
@@ -190,9 +190,7 @@
struct dirhash *ndh;
struct dirhash *dh;
struct vnode *vp;
- int error;
- error = 0;
ndh = dh = NULL;
vp = ip->i_vnode;
for (;;) {
@@ -274,11 +272,9 @@
ufsdirhash_acquire(struct inode *ip)
{
struct dirhash *dh;
- struct vnode *vp;
ASSERT_VOP_ELOCKED(ip->i_vnode, __FUNCTION__);
- vp = ip->i_vnode;
dh = ip->i_dirhash;
if (dh == NULL)
return (NULL);
Index: sys/x86/iommu/busdma_dmar.c
===================================================================
--- sys/x86/iommu/busdma_dmar.c
+++ sys/x86/iommu/busdma_dmar.c
@@ -818,7 +818,6 @@
struct bus_dma_tag_dmar *tag;
struct bus_dmamap_dmar *map;
struct dmar_unit *unit;
- struct dmar_ctx *ctx;
unit = arg;
DMAR_LOCK(unit);
@@ -826,7 +825,6 @@
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
DMAR_UNLOCK(unit);
tag = map->tag;
- ctx = map->tag->ctx;
map->cansleep = true;
map->locked = false;
bus_dmamap_load_mem((bus_dma_tag_t)tag, (bus_dmamap_t)map,
@@ -847,9 +845,7 @@
static void
dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
{
- struct dmar_ctx *ctx;
- ctx = map->tag->ctx;
map->locked = false;
DMAR_LOCK(unit);
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
Index: sys/x86/iommu/intel_idpgtbl.c
===================================================================
--- sys/x86/iommu/intel_idpgtbl.c
+++ sys/x86/iommu/intel_idpgtbl.c
@@ -108,7 +108,7 @@
ctx_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
dmar_gaddr_t addr)
{
- vm_page_t m, m1;
+ vm_page_t m1;
dmar_pte_t *pte;
struct sf_buf *sf;
dmar_gaddr_t f, pg_sz;
@@ -118,7 +118,7 @@
VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
if (addr >= tbl->maxaddr)
return;
- m = dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
+ (void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
DMAR_PGF_ZERO);
base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
pg_sz = pglvl_page_size(tbl->pglvl, lvl);
@@ -598,7 +598,7 @@
dmar_pte_t *pte;
struct sf_buf *sf;
vm_pindex_t idx;
- dmar_gaddr_t pg_sz, base1, size1;
+ dmar_gaddr_t pg_sz;
int lvl;
DMAR_CTX_ASSERT_PGLOCKED(ctx);
@@ -625,8 +625,6 @@
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
pg_sz = 0; /* silence gcc */
- base1 = base;
- size1 = size;
flags |= DMAR_PGF_OBJL;
TD_PREP_PINNED_ASSERT;

File Metadata

Mime Type
text/plain
Expires
Wed, Feb 11, 4:30 AM (40 m, 47 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
28637999
Default Alt Text
D2665.diff (13 KB)

Event Timeline