diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -766,18 +766,24 @@ * Not counting the 'maxproc' contribution, the (marginal) growth factor * of 'physvnodes' with respect to the physical memory size in MB is 64 * until 'physvnodes' exceeds 98,304 (still not counting the 'maxproc' - * contribution). Thereafter, the marginal factor goes down to 16. + * contribution). Thereafter, the marginal factor goes down to 32. 32 + * has been chosen as it ensures that there are slightly more vnodes + * than the default 'maxfiles' (whose marginal growth is 32, while here + * it is slightly higher than 32 because of the 'maxproc' contribution, + * but less than 33 because 'vm_cnt.v_page_count' is smaller than + * 'physpages' used to compute 'maxfiles'), which avoids a deadlock + * situation we do not currently handle. * * Separately, the memory required by vnodes and VM objects must not * exceed 1/10th of the kernel's heap size. This last limit is computed * in 'virtvnodes'. * * With the current formulae, on 64-bit platforms, 'desiredvnodes' is - * 'virtvnodes' up to a physical memory cutoff of ~1674MB, and then + * 'virtvnodes' up to a physical memory cutoff of ~1722MB, and then * 'physvnodes' applies instead. */ - physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + - 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; + physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 32 + + min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 32; virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); desiredvnodes = min(physvnodes, virtvnodes);