diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c --- a/sys/kern/kern_umtx.c +++ b/sys/kern/kern_umtx.c @@ -4485,6 +4485,7 @@ umtx_shm_create_reg(struct thread *td, const struct umtx_key *key, struct umtx_shm_reg **res) { + struct shmfd *shm; struct umtx_shm_reg *reg, *reg1; struct ucred *cred; int error; @@ -4504,9 +4505,14 @@ cred = td->td_ucred; if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP))) return (ENOMEM); + shm = shm_alloc(td->td_ucred, O_RDWR, false); + if (shm == NULL) { + chgumtxcnt(cred->cr_ruidinfo, -1, 0); + return (ENOMEM); + } reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO); bcopy(key, ®->ushm_key, sizeof(*key)); - reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false); + reg->ushm_obj = shm; reg->ushm_cred = crhold(cred); error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE); if (error != 0) { diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c --- a/sys/kern/uipc_shm.c +++ b/sys/kern/uipc_shm.c @@ -947,14 +947,23 @@ if (largepage) { obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); - obj->un_pager.phys.phys_priv = shmfd; + if (obj != NULL) + obj->un_pager.phys.phys_priv = shmfd; shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT; } else { obj = vm_pager_allocate(shmfd_pager_type, NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred); - obj->un_pager.swp.swp_priv = shmfd; + if (obj != NULL) + obj->un_pager.swp.swp_priv = shmfd; + } + if (obj == NULL) { + /* + * swap reservation limits can cause object allocation + * to fail. + */ + free(shmfd, M_SHMFD); + return (NULL); } - KASSERT(obj != NULL, ("shm_create: vm_pager_allocate")); VM_OBJECT_WLOCK(obj); vm_object_set_flag(obj, OBJ_POSIXSHM); VM_OBJECT_WUNLOCK(obj); @@ -1241,11 +1250,14 @@ if (userpath == SHM_ANON) { /* A read-only anonymous object is pointless. */ if ((flags & O_ACCMODE) == O_RDONLY) { - fdclose(td, fp, fd); - fdrop(fp, td); - return (EINVAL); + error = EINVAL; + goto out; } shmfd = shm_alloc(td->td_ucred, cmode, largepage); + if (shmfd == NULL) { + error = ENOMEM; + goto out; + } shmfd->shm_seals = initial_seals; shmfd->shm_flags = shmflags; } else { @@ -1262,17 +1274,28 @@ #endif shmfd = shm_alloc(td->td_ucred, cmode, largepage); - shmfd->shm_seals = initial_seals; - shmfd->shm_flags = shmflags; - shm_insert(path, fnv, shmfd); + if (shmfd == NULL) { + error = ENOMEM; + } else { + shmfd->shm_seals = + initial_seals; + shmfd->shm_flags = shmflags; + shm_insert(path, fnv, shmfd); + } #ifdef MAC } #endif } else { - free(path, M_SHMFD); error = ENOENT; } + if (error != 0) + free(path, M_SHMFD); } else { + /* + * Object already exists, obtain a new reference if + * requested and permitted. + */ + free(path, M_SHMFD); rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX); /* @@ -1285,12 +1308,6 @@ */ initial_seals &= ~shmfd->shm_seals; - /* - * Object already exists, obtain a new - * reference if requested and permitted. - */ - free(path, M_SHMFD); - /* * initial_seals can't set additional seals if we've * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set, @@ -1349,11 +1366,8 @@ } sx_xunlock(&shm_dict_lock); - if (error) { - fdclose(td, fp, fd); - fdrop(fp, td); - return (error); - } + if (error != 0) + goto out; } finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops); @@ -1362,6 +1376,11 @@ fdrop(fp, td); return (0); + +out: + fdclose(td, fp, fd); + fdrop(fp, td); + return (error); } /* System calls. */