Index: head/lib/libthr/thread/thr_pshared.c =================================================================== --- head/lib/libthr/thread/thr_pshared.c +++ head/lib/libthr/thread/thr_pshared.c @@ -86,6 +86,16 @@ _thr_ast(curthread); } +/* + * Among all processes sharing a lock only one executes + * pthread_lock_destroy(). Other processes still have the hash and + * mapped off-page. + * + * Mitigate the problem by checking the liveness of all hashed keys + * periodically. Right now this is executed on each + * pthread_lock_destroy(), but may be done less often if found to be + * too time-consuming. + */ static void pshared_gc(struct pthread *curthread) { @@ -131,6 +141,27 @@ hd = &pshared_hash[PSHARED_KEY_HASH(key)]; LIST_FOREACH(h, hd, link) { + /* + * When the key already exists in the hash, we should + * return either the new (just mapped) or old (hashed) + * val, and the other val should be unmapped to avoid + * address space leak. + * + * If two threads perform lock of the same object + * which is not yet stored in the pshared_hash, then + * the val already inserted by the first thread should + * be returned, and the second val freed (order is by + * the pshared_lock()). Otherwise, if we unmap the + * value obtained from the hash, the first thread + * might operate on an unmapped off-page object. + * + * There is still an issue: if hashed key was unmapped + * and then other page is mapped at the same key + * address, the hash would return the old val. I + * decided to handle the race of simultaneous hash + * insertion, leaving the unlikely remap problem + * unaddressed. + */ if (h->key == key) { if (h->val != *val) { munmap(*val, PAGE_SIZE);