Index: lib/libthr/thread/thr_pshared.c =================================================================== --- lib/libthr/thread/thr_pshared.c +++ lib/libthr/thread/thr_pshared.c @@ -86,6 +86,16 @@ _thr_ast(curthread); } +/* + * Only one among all processes sharing a lock, executes + * pthread_lock_destroy() for the lock. Processes which did not + * destroyed the lock, would keep the hash and mapped off-page. + * + * Mitigate the problem by checking the liveness of all hashed keys + * periodically. Right now this is executed on each + * pthread_lock_destroy(), but may be done less often if found to be + * too time-consuming. + */ static void pshared_gc(struct pthread *curthread) { @@ -131,6 +141,25 @@ hd = &pshared_hash[PSHARED_KEY_HASH(key)]; LIST_FOREACH(h, hd, link) { + /* + * When the key already exists in the hash, we should + * return either new (just mapped) or old (hashed) + * val, and the other val should be unmapped to avoid + * address space leak. + * + * If two threads perform lock of the same object + * which is not yet remembered in the pshared_hash, + * then the val already inserted by the first thread + * should be returned, and the second val freed (order + * is by the pshared_lock()). Otherwise, if we unmap + * the value from hash, first thread might operate on + * unmapped off-page object. + * + * There is still an issue, since if hashed key was + * unmapped and then other page is mapped at the key + * address, the hash would return old val. I decided + * to handle race over the unlikely remap problem. + */ if (h->key == key) { if (h->val != *val) { munmap(*val, PAGE_SIZE);