diff --git a/lib/libthr/arch/aarch64/include/pthread_md.h b/lib/libthr/arch/aarch64/include/pthread_md.h index d14bce244f8..121bac7eb12 100644 --- a/lib/libthr/arch/aarch64/include/pthread_md.h +++ b/lib/libthr/arch/aarch64/include/pthread_md.h @@ -23,41 +23,44 @@ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT + #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant I tcb. The structure layout is fixed, don't blindly * change it. */ struct tcb { void *tcb_dtv; struct pthread *tcb_thread; }; /* Called from the thread to set its private data. */ static __inline void _tcb_set(struct tcb *tcb) { __asm __volatile("msr tpidr_el0, %x0" :: "r" (tcb)); } /* diff --git a/lib/libthr/arch/amd64/include/pthread_md.h b/lib/libthr/arch/amd64/include/pthread_md.h index 1d393d9cbbe..4ff7b3360bd 100644 --- a/lib/libthr/arch/amd64/include/pthread_md.h +++ b/lib/libthr/arch/amd64/include/pthread_md.h @@ -22,41 +22,43 @@ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT __asm __volatile("pause") +#define CPU_SPINWAIT_EXIT #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant II tcb, first two members are required by rtld, * %fs points to the structure. */ struct tcb { struct tcb *tcb_self; /* required by rtld */ void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; void *tcb_spare[1]; }; /* * Evaluates to the byte offset of the per-tcb variable name. */ #define __tcb_offset(name) __offsetof(struct tcb, name) /* diff --git a/lib/libthr/arch/arm/include/pthread_md.h b/lib/libthr/arch/arm/include/pthread_md.h index 55d93d0853b..06a2e5d23ee 100644 --- a/lib/libthr/arch/arm/include/pthread_md.h +++ b/lib/libthr/arch/arm/include/pthread_md.h @@ -21,41 +21,44 @@ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT + #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant I tcb. The structure layout is fixed, don't blindly * change it. */ struct tcb { void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; /* our hook */ }; /* Called from the thread to set its private data. */ static __inline void _tcb_set(struct tcb *tcb) { #ifdef ARM_TP_ADDRESS *((struct tcb **)ARM_TP_ADDRESS) = tcb; /* avoids a system call */ #else sysarch(ARM_SET_TP, tcb); #endif diff --git a/lib/libthr/arch/i386/include/pthread_md.h b/lib/libthr/arch/i386/include/pthread_md.h index 7de8aa56203..6342b53250d 100644 --- a/lib/libthr/arch/i386/include/pthread_md.h +++ b/lib/libthr/arch/i386/include/pthread_md.h @@ -22,41 +22,43 @@ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT __asm __volatile("pause") +#define CPU_SPINWAIT_EXIT #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant II tcb, first two members are required by rtld, * %gs points to the structure. */ struct tcb { struct tcb *tcb_self; /* required by rtld */ void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; }; /* * Evaluates to the byte offset of the per-tcb variable name. */ #define __tcb_offset(name) __offsetof(struct tcb, name) /* * Evaluates to the type of the per-tcb variable name. diff --git a/lib/libthr/arch/mips/include/pthread_md.h b/lib/libthr/arch/mips/include/pthread_md.h index 20b0e706559..347c1be1b7d 100644 --- a/lib/libthr/arch/mips/include/pthread_md.h +++ b/lib/libthr/arch/mips/include/pthread_md.h @@ -23,41 +23,44 @@ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: src/lib/libthr/arch/arm/include/pthread_md.h,v 1.3 2005/10/29 13:40:31 davidxu * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT + #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant I tcb. The structure layout is fixed, don't blindly * change it! */ struct tcb { void *tcb_dtv; struct pthread *tcb_thread; }; /* Called from the thread to set its private data. */ static __inline void _tcb_set(struct tcb *tcb) { sysarch(MIPS_SET_TLS, tcb); } /* diff --git a/lib/libthr/arch/powerpc/include/pthread_md.h b/lib/libthr/arch/powerpc/include/pthread_md.h index a9923d7337d..736c27404a0 100644 --- a/lib/libthr/arch/powerpc/include/pthread_md.h +++ b/lib/libthr/arch/powerpc/include/pthread_md.h @@ -21,41 +21,49 @@ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include +#ifdef __powerpc64__ +#define CPU_SPINWAIT_ENTER __asm __volatile("or 31,31,31") #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT __asm __volatile("or 2,2,2") +#else +#define CPU_SPINWAIT_ENTER +#define CPU_SPINWAIT __asm __volatile("or 27,27,27") +#define CPU_SPINWAIT_EXIT +#endif #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) #ifdef __powerpc64__ #define TP_OFFSET 0x7010 #else #define TP_OFFSET 0x7008 #endif /* * Variant I tcb. The structure layout is fixed, don't blindly * change it. * %r2 (32-bit) or %r13 (64-bit) points to end of the structure. */ struct tcb { void *tcb_dtv; struct pthread *tcb_thread; }; static __inline void _tcb_set(struct tcb *tcb) diff --git a/lib/libthr/arch/riscv/include/pthread_md.h b/lib/libthr/arch/riscv/include/pthread_md.h index 499da5f5a8f..b1255cd6ac6 100644 --- a/lib/libthr/arch/riscv/include/pthread_md.h +++ b/lib/libthr/arch/riscv/include/pthread_md.h @@ -27,41 +27,44 @@ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT + #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) #define TP_OFFSET sizeof(struct tcb) /* * Variant I tcb. The structure layout is fixed, don't blindly * change it! */ struct tcb { void *tcb_dtv; struct pthread *tcb_thread; }; /* Called from the thread to set its private data. */ static __inline void _tcb_set(struct tcb *tcb) { __asm __volatile("mv tp, %0" :: "r"((uint8_t *)tcb + TP_OFFSET)); } diff --git a/lib/libthr/arch/sparc64/include/pthread_md.h b/lib/libthr/arch/sparc64/include/pthread_md.h index 85c5b7e80b1..edb54bf9362 100644 --- a/lib/libthr/arch/sparc64/include/pthread_md.h +++ b/lib/libthr/arch/sparc64/include/pthread_md.h @@ -20,41 +20,43 @@ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Machine-dependent thread prototypes/definitions. */ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ #include +#define CPU_SPINWAIT_ENTER #define CPU_SPINWAIT +#define CPU_SPINWAIT_EXIT #define DTV_OFFSET offsetof(struct tcb, tcb_dtv) /* * Variant II tcb, first two members are required by rtld. * %g7 points to the structure. */ struct tcb { struct tcb *tcb_self; /* required by rtld */ void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; /* our hook */ void *tcb_spare[1]; }; /* Called from the thread to set its private data. */ static __inline void _tcb_set(struct tcb *tcb) { __asm __volatile("mov %0, %%g7" : : "r" (tcb)); diff --git a/lib/libthr/thread/thr_mutex.c b/lib/libthr/thread/thr_mutex.c index d2d9f5b54c1..c36c90aa24a 100644 --- a/lib/libthr/thread/thr_mutex.c +++ b/lib/libthr/thread/thr_mutex.c @@ -650,46 +650,49 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, /* * For adaptive mutexes, spin for a bit in the expectation * that if the application requests this mutex type then * the lock is likely to be released quickly and it is * faster than entering the kernel */ if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0)) goto sleep_in_kernel; if (!_thr_is_smp) goto yield_loop; count = m->m_spinloops; while (count--) { owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id | owner)) { + CPU_SPINWAIT_EXIT; ret = 0; goto done; } + CPU_SPINWAIT_ENTER; } CPU_SPINWAIT; } + CPU_SPINWAIT_EXIT; yield_loop: count = m->m_yieldloops; while (count--) { _sched_yield(); owner = m->m_lock.m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id | owner)) { ret = 0; goto done; } } } sleep_in_kernel: if (abstime == NULL) ret = __thr_umutex_lock(&m->m_lock, id); else if (__predict_false(abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)) diff --git a/lib/libthr/thread/thr_pspinlock.c b/lib/libthr/thread/thr_pspinlock.c index c71bdbb3f19..2cf8bea1091 100644 --- a/lib/libthr/thread/thr_pspinlock.c +++ b/lib/libthr/thread/thr_pspinlock.c @@ -109,47 +109,50 @@ _pthread_spin_trylock(pthread_spinlock_t *lock) return (EINVAL); return (THR_UMUTEX_TRYLOCK(_get_curthread(), &lck->s_lock)); } int _pthread_spin_lock(pthread_spinlock_t *lock) { struct pthread *curthread; struct pthread_spinlock *lck; int count; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); curthread = _get_curthread(); count = SPIN_COUNT; while (THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock) != 0) { + CPU_SPINWAIT_ENTER; while (lck->s_lock.m_owner) { if (!_thr_is_smp) { _pthread_yield(); } else { CPU_SPINWAIT; if (--count <= 0) { + CPU_SPINWAIT_EXIT; count = SPIN_COUNT; _pthread_yield(); } } } } + CPU_SPINWAIT_EXIT; return (0); } int _pthread_spin_unlock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; if (lock == NULL) return (EINVAL); lck = *lock == THR_PSHARED_PTR ? __thr_pshared_offpage(lock, 0) : *lock; if (lck == NULL) return (EINVAL); return (THR_UMUTEX_UNLOCK(_get_curthread(), &lck->s_lock)); } diff --git a/lib/libthr/thread/thr_umtx.c b/lib/libthr/thread/thr_umtx.c index 86e5a1f1378..c795480eea1 100644 --- a/lib/libthr/thread/thr_umtx.c +++ b/lib/libthr/thread/thr_umtx.c @@ -85,50 +85,58 @@ __thr_umutex_lock(struct umutex *mtx, uint32_t id) #define SPINLOOPS 1000 int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) { uint32_t owner; int count; if (!_thr_is_smp) return (__thr_umutex_lock(mtx, id)); if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0)); for (;;) { count = SPINLOOPS; while (count--) { owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && atomic_cmpset_acq_32(&mtx->m_owner, owner, - id | owner)) + id | owner)) { + CPU_SPINWAIT_EXIT; return (0); + } if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) && atomic_cmpset_acq_32(&mtx->m_owner, owner, - id | UMUTEX_CONTESTED)) + id | UMUTEX_CONTESTED)) { + CPU_SPINWAIT_EXIT; return (EOWNERDEAD); - if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) + } + if (__predict_false(owner == UMUTEX_RB_NOTRECOV)) { + CPU_SPINWAIT_EXIT; return (ENOTRECOVERABLE); + } + CPU_SPINWAIT_ENTER; CPU_SPINWAIT; } + CPU_SPINWAIT_EXIT; /* wait in kernel */ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); } } int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *abstime) { struct _umtx_time *tm_p, timeout; size_t tm_size; uint32_t owner; int ret; if (abstime == NULL) { tm_p = NULL; tm_size = 0; } else { timeout._clockid = CLOCK_REALTIME;