Index: head/lib/libkse/arch/arm/arm/context.S =================================================================== --- head/lib/libkse/arch/arm/arm/context.S (revision 137282) +++ head/lib/libkse/arch/arm/arm/context.S (revision 137283) @@ -1,81 +1,76 @@ /* * Copyright (c) Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * int thr_setcontext(mcontext_t *mcp, intptr_t val, intptr_t *loc) * * Restores the context in mcp. * * Returns 0 if there are no errors; -1 otherwise */ .weak _C_LABEL(_thr_setcontext) .set _C_LABEL(_thr_setcontext), _C_LABEL(__thr_setcontext) ENTRY(__thr_setcontext) /* Check for NULL pointer. */ cmp r0, #0 moveq r0, #-1 moveq pc, lr - add ip, r0, #8 - ldmia ip, {r2-r12} cmp r2, #0 - str r1, [r2] - add ip, r0, #4 - str ip, [r1] /* Restore r1. */ - add ip, r0, #64 - msr cpsr, ip - add ip, r0, #52 - mov r0, #0 /* Return 0. */ - ldr sp, [ip] /* Restore stack pointer. */ - mov pc, lr /* Return. */ + strne r1, [r2] + ldmia r0, {r0-r15} /* XXX: FP bits ? */ /* * int thr_getcontext(mcontext_t *mcp); * * Returns -1 if there is an error, 0 no errors; 1 upon return * from a setcontext(). */ .weak _C_LABEL(_thr_getcontext) .set _C_LABEL(_thr_getcontext), _C_LABEL(__thr_getcontext) ENTRY(__thr_getcontext) /* Check for NULL pointer. */ cmp r0, #0 moveq r0, #-1 moveq pc, lr stmia r0, {r0-r13} + mov r1, #1 + str r1, [r0] /* Return 1 from setcontext */ + str lr, [r0, #(15 * 4)] /* PC */ + mrs r1, cpsr + str r1, [r0, #(16 * 4)] /* CPSR */ mov r0, #0 /* Return 0. */ mov pc, lr ENTRY(_arm_enter_uts) - add r4, r2, r3 /* Stack addr + size. */ - mov lr, pc + add sp, r2, r3 /* Stack addr + size. */ mov pc, r1 Index: head/lib/libkse/arch/arm/arm/pthread_md.c =================================================================== --- head/lib/libkse/arch/arm/arm/pthread_md.c (revision 137282) +++ head/lib/libkse/arch/arm/arm/pthread_md.c (revision 137283) @@ -1,91 +1,84 @@ /*- * Copyright (C) 2003 Jake Burkholder * Copyright (C) 2003 David Xu * Copyright (c) 2001,2003 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "pthread_md.h" +struct arm_tp *_tp = NULL; + struct tcb * _tcb_ctor(struct pthread *thread, int initial) { struct tcb *tcb; - void *addr; - addr = malloc(sizeof(struct tcb) + 63); - if (addr == NULL) - tcb = NULL; - else { - tcb = (struct tcb *)(((uintptr_t)(addr) + 63) & ~63); + if ((tcb = malloc(sizeof(struct tcb)))) { bzero(tcb, sizeof(struct tcb)); - tcb->tcb_addr = addr; tcb->tcb_thread = thread; /* XXX - Allocate tdv/tls */ } return (tcb); } void _tcb_dtor(struct tcb *tcb) { - void *addr; - addr = tcb->tcb_addr; - tcb->tcb_addr = NULL; - free(addr); + free(tcb); } struct kcb * _kcb_ctor(struct kse *kse) { struct kcb *kcb; kcb = malloc(sizeof(struct kcb)); if (kcb != NULL) { bzero(kcb, sizeof(struct kcb)); kcb->kcb_faketcb.tcb_isfake = 1; kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL; kcb->kcb_curtcb = &kcb->kcb_faketcb; kcb->kcb_kse = kse; } return (kcb); } void _kcb_dtor(struct kcb *kcb) { free(kcb); } Index: head/lib/libkse/arch/arm/include/atomic_ops.h =================================================================== --- head/lib/libkse/arch/arm/include/atomic_ops.h (revision 137282) +++ head/lib/libkse/arch/arm/include/atomic_ops.h (revision 137283) @@ -1,51 +1,68 @@ /*- * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ATOMIC_OPS_H_ #define _ATOMIC_OPS_H_ +#include +#include "thr_private.h" + /* * Atomic swap: * Atomic (tmp = *dst, *dst = val), then *res = tmp * * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res); */ static inline void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res) { - __asm __volatile( - "swp %2, %2, [%1]; mov %2, %0" - : "=r" (*res) : "r" (dst), "r" (val) : "cc"); + *res = __swp(val, dst); } #define atomic_swap_ptr(d, v, r) \ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) #define atomic_swap_int(d, v, r) \ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) #endif + +static inline u_int32_t +atomic_cmpset_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval) +{ + kse_critical_t crit = _kse_critical_enter(); + int ret; + + if (*p == cmpval) { + *p = newval; + ret = 1; + } else + ret = 0; + _kse_critical_leave(crit); + return (ret); +} + Index: head/lib/libkse/arch/arm/include/pthread_md.h =================================================================== --- head/lib/libkse/arch/arm/include/pthread_md.h (revision 137282) +++ head/lib/libkse/arch/arm/include/pthread_md.h (revision 137283) @@ -1,254 +1,256 @@ /*- * Copyright (c) 2003 Jake Burkholder . * Copyright (c) 2003 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions for the thread kernel. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include #define KSE_STACKSIZE 16384 #define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv) int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); int _thr_getcontext(mcontext_t *); #define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL) #define PER_THREAD struct kcb; struct kse; struct pthread; struct tcb; struct tdv; /* We don't know what this is yet? */ /* * %r6 points to one of these. We define the static TLS as an array * of long double to enforce 16-byte alignment of the TLS memory. * * XXX - Both static and dynamic allocation of any of these structures * will result in a valid, well-aligned thread pointer??? */ struct arm_tp { struct tdv *tp_tdv; /* dynamic TLS */ - uint32_t _reserved_; - long double tp_tls[0]; /* static TLS */ }; struct tcb { struct pthread *tcb_thread; - void *tcb_addr; /* allocated tcb address */ struct kcb *tcb_curkcb; uint32_t tcb_isfake; - uint32_t tcb_spare[4]; struct kse_thr_mailbox tcb_tmbx; /* needs 32-byte alignment */ struct arm_tp tcb_tp; -} __aligned(32); +}; struct kcb { struct kse_mailbox kcb_kmbx; struct tcb kcb_faketcb; struct tcb *kcb_curtcb; struct kse *kcb_kse; }; -register struct arm_tp *_tp __asm("%r6"); +extern struct arm_tp *_tp; #define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp))) /* * The kcb and tcb constructors. */ struct tcb *_tcb_ctor(struct pthread *, int); void _tcb_dtor(struct tcb *); struct kcb *_kcb_ctor(struct kse *kse); void _kcb_dtor(struct kcb *); +static __inline uint32_t +__kcb_swp(uint32_t val, void *ptr) +{ + + __asm __volatile("swp %0, %1, [%2]" + : "=r" (val) : "r" (val) , "r" (ptr) : "memory"); + return (val); +} + /* Called from the KSE to set its private data. */ static __inline void _kcb_set(struct kcb *kcb) { /* There is no thread yet; use the fake tcb. */ - _tp = &kcb->kcb_faketcb.tcb_tp; + __kcb_swp((uint32_t)&kcb->kcb_faketcb.tcb_tp, &_tp); } /* * Get the current kcb. * * This can only be called while in a critical region; don't * worry about having the kcb changed out from under us. */ static __inline struct kcb * _kcb_get(void) { return (_tcb->tcb_curkcb); } /* * Enter a critical region. * * Read and clear km_curthread in the kse mailbox. */ static __inline struct kse_thr_mailbox * _kcb_critical_enter(void) { struct kse_thr_mailbox *crit; - uint32_t flags; - if (_tcb->tcb_isfake != 0) { - /* - * We already are in a critical region since - * there is no current thread. - */ - crit = NULL; - } else { - flags = _tcb->tcb_tmbx.tm_flags; - _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; - crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread; - _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL; - _tcb->tcb_tmbx.tm_flags = flags; - } + if (_tcb->tcb_isfake) + return (NULL); + crit = (struct kse_thr_mailbox *)__kcb_swp((uint32_t)NULL, + &_tcb->tcb_curkcb->kcb_kmbx.km_curthread); return (crit); } static __inline void _kcb_critical_leave(struct kse_thr_mailbox *crit) { - /* No need to do anything if this is a fake tcb. */ + if (_tcb->tcb_isfake == 0) - _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit; + __kcb_swp((uint32_t)crit, + &_tcb->tcb_curkcb->kcb_kmbx.km_curthread); } static __inline int _kcb_in_critical(void) { uint32_t flags; int ret; + return (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); if (_tcb->tcb_isfake != 0) { /* * We are in a critical region since there is no * current thread. */ ret = 1; } else { flags = _tcb->tcb_tmbx.tm_flags; _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); _tcb->tcb_tmbx.tm_flags = flags; } return (ret); } static __inline void _tcb_set(struct kcb *kcb, struct tcb *tcb) { - if (tcb == NULL) + if (tcb == NULL) tcb = &kcb->kcb_faketcb; + __kcb_swp((uint32_t)&tcb->tcb_tp, &_tp); kcb->kcb_curtcb = tcb; tcb->tcb_curkcb = kcb; - _tp = &tcb->tcb_tp; } static __inline struct tcb * _tcb_get(void) { return (_tcb); } static __inline struct pthread * _get_curthread(void) { return (_tcb->tcb_thread); } /* * Get the current kse. * * Like _kcb_get(), this can only be called while in a critical region. */ static __inline struct kse * _get_curkse(void) { return (_tcb->tcb_curkcb->kcb_kse); } -void _arm_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, +void _arm_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, size_t stacksz); static __inline int _thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (_thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) { - /* Make the fake tcb the current thread. */ + int ret; + + if ((ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext)) + == 0) { kcb->kcb_curtcb = &kcb->kcb_faketcb; - _tp = &kcb->kcb_faketcb.tcb_tp; - _arm_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx, + __kcb_swp((int)&kcb->kcb_faketcb.tcb_tp, &_tp); + _arm_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, kcb->kcb_kmbx.km_stack.ss_sp, kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); - } + } else if (ret < 0) + return (-1); return (0); } static __inline int _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { extern int _libkse_debug; mcontext_t *mc; + if (!tcb || !kcb) + return (-1); _tcb_set(kcb, tcb); mc = &tcb->tcb_tmbx.tm_context.uc_mcontext; - if (_libkse_debug == 0) { + if (0 && _libkse_debug == 0) { tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp; if (setmbox) _thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx, (intptr_t *)&kcb->kcb_kmbx.km_curthread); else _thr_setcontext(mc, 0, NULL); } else { if (setmbox) kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX); else kse_switchin(&tcb->tcb_tmbx, 0); } /* We should not reach here. */ return (-1); } #endif /* _PTHREAD_MD_H_ */ Index: head/lib/libpthread/arch/arm/arm/context.S =================================================================== --- head/lib/libpthread/arch/arm/arm/context.S (revision 137282) +++ head/lib/libpthread/arch/arm/arm/context.S (revision 137283) @@ -1,81 +1,76 @@ /* * Copyright (c) Olivier Houchard * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * int thr_setcontext(mcontext_t *mcp, intptr_t val, intptr_t *loc) * * Restores the context in mcp. * * Returns 0 if there are no errors; -1 otherwise */ .weak _C_LABEL(_thr_setcontext) .set _C_LABEL(_thr_setcontext), _C_LABEL(__thr_setcontext) ENTRY(__thr_setcontext) /* Check for NULL pointer. */ cmp r0, #0 moveq r0, #-1 moveq pc, lr - add ip, r0, #8 - ldmia ip, {r2-r12} cmp r2, #0 - str r1, [r2] - add ip, r0, #4 - str ip, [r1] /* Restore r1. */ - add ip, r0, #64 - msr cpsr, ip - add ip, r0, #52 - mov r0, #0 /* Return 0. */ - ldr sp, [ip] /* Restore stack pointer. */ - mov pc, lr /* Return. */ + strne r1, [r2] + ldmia r0, {r0-r15} /* XXX: FP bits ? */ /* * int thr_getcontext(mcontext_t *mcp); * * Returns -1 if there is an error, 0 no errors; 1 upon return * from a setcontext(). */ .weak _C_LABEL(_thr_getcontext) .set _C_LABEL(_thr_getcontext), _C_LABEL(__thr_getcontext) ENTRY(__thr_getcontext) /* Check for NULL pointer. */ cmp r0, #0 moveq r0, #-1 moveq pc, lr stmia r0, {r0-r13} + mov r1, #1 + str r1, [r0] /* Return 1 from setcontext */ + str lr, [r0, #(15 * 4)] /* PC */ + mrs r1, cpsr + str r1, [r0, #(16 * 4)] /* CPSR */ mov r0, #0 /* Return 0. */ mov pc, lr ENTRY(_arm_enter_uts) - add r4, r2, r3 /* Stack addr + size. */ - mov lr, pc + add sp, r2, r3 /* Stack addr + size. */ mov pc, r1 Index: head/lib/libpthread/arch/arm/arm/pthread_md.c =================================================================== --- head/lib/libpthread/arch/arm/arm/pthread_md.c (revision 137282) +++ head/lib/libpthread/arch/arm/arm/pthread_md.c (revision 137283) @@ -1,91 +1,84 @@ /*- * Copyright (C) 2003 Jake Burkholder * Copyright (C) 2003 David Xu * Copyright (c) 2001,2003 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include "pthread_md.h" +struct arm_tp *_tp = NULL; + struct tcb * _tcb_ctor(struct pthread *thread, int initial) { struct tcb *tcb; - void *addr; - addr = malloc(sizeof(struct tcb) + 63); - if (addr == NULL) - tcb = NULL; - else { - tcb = (struct tcb *)(((uintptr_t)(addr) + 63) & ~63); + if ((tcb = malloc(sizeof(struct tcb)))) { bzero(tcb, sizeof(struct tcb)); - tcb->tcb_addr = addr; tcb->tcb_thread = thread; /* XXX - Allocate tdv/tls */ } return (tcb); } void _tcb_dtor(struct tcb *tcb) { - void *addr; - addr = tcb->tcb_addr; - tcb->tcb_addr = NULL; - free(addr); + free(tcb); } struct kcb * _kcb_ctor(struct kse *kse) { struct kcb *kcb; kcb = malloc(sizeof(struct kcb)); if (kcb != NULL) { bzero(kcb, sizeof(struct kcb)); kcb->kcb_faketcb.tcb_isfake = 1; kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL; kcb->kcb_curtcb = &kcb->kcb_faketcb; kcb->kcb_kse = kse; } return (kcb); } void _kcb_dtor(struct kcb *kcb) { free(kcb); } Index: head/lib/libpthread/arch/arm/include/atomic_ops.h =================================================================== --- head/lib/libpthread/arch/arm/include/atomic_ops.h (revision 137282) +++ head/lib/libpthread/arch/arm/include/atomic_ops.h (revision 137283) @@ -1,51 +1,68 @@ /*- * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _ATOMIC_OPS_H_ #define _ATOMIC_OPS_H_ +#include +#include "thr_private.h" + /* * Atomic swap: * Atomic (tmp = *dst, *dst = val), then *res = tmp * * void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res); */ static inline void atomic_swap32(intptr_t *dst, intptr_t val, intptr_t *res) { - __asm __volatile( - "swp %2, %2, [%1]; mov %2, %0" - : "=r" (*res) : "r" (dst), "r" (val) : "cc"); + *res = __swp(val, dst); } #define atomic_swap_ptr(d, v, r) \ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) #define atomic_swap_int(d, v, r) \ atomic_swap32((intptr_t *)d, (intptr_t)v, (intptr_t *)r) #endif + +static inline u_int32_t +atomic_cmpset_32(volatile u_int32_t *p, u_int32_t cmpval, u_int32_t newval) +{ + kse_critical_t crit = _kse_critical_enter(); + int ret; + + if (*p == cmpval) { + *p = newval; + ret = 1; + } else + ret = 0; + _kse_critical_leave(crit); + return (ret); +} + Index: head/lib/libpthread/arch/arm/include/pthread_md.h =================================================================== --- head/lib/libpthread/arch/arm/include/pthread_md.h (revision 137282) +++ head/lib/libpthread/arch/arm/include/pthread_md.h (revision 137283) @@ -1,254 +1,256 @@ /*- * Copyright (c) 2003 Jake Burkholder . * Copyright (c) 2003 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions for the thread kernel. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include #define KSE_STACKSIZE 16384 #define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv) int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); int _thr_getcontext(mcontext_t *); #define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL) #define PER_THREAD struct kcb; struct kse; struct pthread; struct tcb; struct tdv; /* We don't know what this is yet? */ /* * %r6 points to one of these. We define the static TLS as an array * of long double to enforce 16-byte alignment of the TLS memory. * * XXX - Both static and dynamic allocation of any of these structures * will result in a valid, well-aligned thread pointer??? */ struct arm_tp { struct tdv *tp_tdv; /* dynamic TLS */ - uint32_t _reserved_; - long double tp_tls[0]; /* static TLS */ }; struct tcb { struct pthread *tcb_thread; - void *tcb_addr; /* allocated tcb address */ struct kcb *tcb_curkcb; uint32_t tcb_isfake; - uint32_t tcb_spare[4]; struct kse_thr_mailbox tcb_tmbx; /* needs 32-byte alignment */ struct arm_tp tcb_tp; -} __aligned(32); +}; struct kcb { struct kse_mailbox kcb_kmbx; struct tcb kcb_faketcb; struct tcb *kcb_curtcb; struct kse *kcb_kse; }; -register struct arm_tp *_tp __asm("%r6"); +extern struct arm_tp *_tp; #define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp))) /* * The kcb and tcb constructors. */ struct tcb *_tcb_ctor(struct pthread *, int); void _tcb_dtor(struct tcb *); struct kcb *_kcb_ctor(struct kse *kse); void _kcb_dtor(struct kcb *); +static __inline uint32_t +__kcb_swp(uint32_t val, void *ptr) +{ + + __asm __volatile("swp %0, %1, [%2]" + : "=r" (val) : "r" (val) , "r" (ptr) : "memory"); + return (val); +} + /* Called from the KSE to set its private data. */ static __inline void _kcb_set(struct kcb *kcb) { /* There is no thread yet; use the fake tcb. */ - _tp = &kcb->kcb_faketcb.tcb_tp; + __kcb_swp((uint32_t)&kcb->kcb_faketcb.tcb_tp, &_tp); } /* * Get the current kcb. * * This can only be called while in a critical region; don't * worry about having the kcb changed out from under us. */ static __inline struct kcb * _kcb_get(void) { return (_tcb->tcb_curkcb); } /* * Enter a critical region. * * Read and clear km_curthread in the kse mailbox. */ static __inline struct kse_thr_mailbox * _kcb_critical_enter(void) { struct kse_thr_mailbox *crit; - uint32_t flags; - if (_tcb->tcb_isfake != 0) { - /* - * We already are in a critical region since - * there is no current thread. - */ - crit = NULL; - } else { - flags = _tcb->tcb_tmbx.tm_flags; - _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; - crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread; - _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL; - _tcb->tcb_tmbx.tm_flags = flags; - } + if (_tcb->tcb_isfake) + return (NULL); + crit = (struct kse_thr_mailbox *)__kcb_swp((uint32_t)NULL, + &_tcb->tcb_curkcb->kcb_kmbx.km_curthread); return (crit); } static __inline void _kcb_critical_leave(struct kse_thr_mailbox *crit) { - /* No need to do anything if this is a fake tcb. */ + if (_tcb->tcb_isfake == 0) - _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit; + __kcb_swp((uint32_t)crit, + &_tcb->tcb_curkcb->kcb_kmbx.km_curthread); } static __inline int _kcb_in_critical(void) { uint32_t flags; int ret; + return (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); if (_tcb->tcb_isfake != 0) { /* * We are in a critical region since there is no * current thread. */ ret = 1; } else { flags = _tcb->tcb_tmbx.tm_flags; _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); _tcb->tcb_tmbx.tm_flags = flags; } return (ret); } static __inline void _tcb_set(struct kcb *kcb, struct tcb *tcb) { - if (tcb == NULL) + if (tcb == NULL) tcb = &kcb->kcb_faketcb; + __kcb_swp((uint32_t)&tcb->tcb_tp, &_tp); kcb->kcb_curtcb = tcb; tcb->tcb_curkcb = kcb; - _tp = &tcb->tcb_tp; } static __inline struct tcb * _tcb_get(void) { return (_tcb); } static __inline struct pthread * _get_curthread(void) { return (_tcb->tcb_thread); } /* * Get the current kse. * * Like _kcb_get(), this can only be called while in a critical region. */ static __inline struct kse * _get_curkse(void) { return (_tcb->tcb_curkcb->kcb_kse); } -void _arm_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, +void _arm_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, size_t stacksz); static __inline int _thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (_thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) { - /* Make the fake tcb the current thread. */ + int ret; + + if ((ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext)) + == 0) { kcb->kcb_curtcb = &kcb->kcb_faketcb; - _tp = &kcb->kcb_faketcb.tcb_tp; - _arm_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx, + __kcb_swp((int)&kcb->kcb_faketcb.tcb_tp, &_tp); + _arm_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, kcb->kcb_kmbx.km_stack.ss_sp, kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); - } + } else if (ret < 0) + return (-1); return (0); } static __inline int _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { extern int _libkse_debug; mcontext_t *mc; + if (!tcb || !kcb) + return (-1); _tcb_set(kcb, tcb); mc = &tcb->tcb_tmbx.tm_context.uc_mcontext; - if (_libkse_debug == 0) { + if (0 && _libkse_debug == 0) { tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp; if (setmbox) _thr_setcontext(mc, (intptr_t)&tcb->tcb_tmbx, (intptr_t *)&kcb->kcb_kmbx.km_curthread); else _thr_setcontext(mc, 0, NULL); } else { if (setmbox) kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX); else kse_switchin(&tcb->tcb_tmbx, 0); } /* We should not reach here. */ return (-1); } #endif /* _PTHREAD_MD_H_ */