%PDF-1.5 %���� ºaâÚÎΞ-ÌE1ÍØÄ÷{òò2ÿ ÛÖ^ÔÀá TÎ{¦?§®¥kuµùÕ5sLOšuY
Server IP : 49.231.201.246 / Your IP : 216.73.216.149 Web Server : Apache/2.4.18 (Ubuntu) System : Linux 246 4.4.0-210-generic #242-Ubuntu SMP Fri Apr 16 09:57:56 UTC 2021 x86_64 User : root ( 0) PHP Version : 7.0.33-0ubuntu0.16.04.16 Disable Function : exec,passthru,shell_exec,system,proc_open,popen,pcntl_exec MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : ON | Pkexec : ON Directory : /usr/src/linux-headers-4.4.0-210/arch/sparc/include/asm/ |
Upload File : |
/* * rwsem.h: R/W semaphores implemented using CAS * * Written by David S. Miller (davem@redhat.com), 2001. * Derived from asm-i386/rwsem.h */ #ifndef _SPARC64_RWSEM_H #define _SPARC64_RWSEM_H #ifndef _LINUX_RWSEM_H #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" #endif #ifdef __KERNEL__ #define RWSEM_UNLOCKED_VALUE 0x00000000L #define RWSEM_ACTIVE_BIAS 0x00000001L #define RWSEM_ACTIVE_MASK 0xffffffffL #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) /* * lock for reading */ static inline void __down_read(struct rw_semaphore *sem) { if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L)) rwsem_down_read_failed(sem); } static inline int __down_read_trylock(struct rw_semaphore *sem) { long tmp; while ((tmp = sem->count) >= 0L) { if (tmp == cmpxchg(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } } return 0; } /* * lock for writing */ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { long tmp; tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic64_t *)(&sem->count)); if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) rwsem_down_write_failed(sem); } static inline void __down_write(struct rw_semaphore *sem) { __down_write_nested(sem, 0); } static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } /* * unlock after reading */ static inline void __up_read(struct rw_semaphore *sem) { long tmp; tmp = atomic64_dec_return((atomic64_t *)(&sem->count)); if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L)) rwsem_wake(sem); } /* * unlock after writing */ static inline void __up_write(struct rw_semaphore *sem) { if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS, (atomic64_t *)(&sem->count)) < 0L)) rwsem_wake(sem); } /* * implement atomic add functionality */ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) { atomic64_add(delta, (atomic64_t *)(&sem->count)); } /* * downgrade write lock to read lock */ static inline void __downgrade_write(struct rw_semaphore *sem) { long tmp; tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count)); if (tmp < 0L) rwsem_downgrade_wake(sem); } /* * implement exchange and add functionality */ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) { return atomic64_add_return(delta, (atomic64_t *)(&sem->count)); } #endif /* __KERNEL__ */ #endif /* _SPARC64_RWSEM_H */