arch_spin_lock
arch_spin_trylock
arch_spin_unlock
arch_write_lock
arch_write_trylock
arch_write_unlock
arch_read_lock
arch_read_unlock
arch_read_trylock
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
u32 newval;
arch_spinlock_t lockval;
prefetchw(&lock->slock);
__asm__ __volatile__(
"1:
ldrex
%0, [%3]\n"
"
add
%1, %0, %4\n"
"
strex
%2, %1, [%3]\n"
"
teq
%2, #0\n"
"
bne
1b"
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
smp_mb();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long contended, res;
u32 slock;
prefetchw(&lock->slock);
do {
__asm__ __volatile__(
"
ldrex
%0, [%3]\n"
"
mov
%2, #0\n"
"
subs
%1, %0, %0, ror #16\n"
"
addeq
%0, %0, %4\n"
"
strexeq
%2, %0, [%3]"
: "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
if (!contended) {
smp_mb();
return 1;
} else {
return 0;
}
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
lock->tickets.owner++;
dsb_sev();
}
/*
* RWLOCKS
*
*
* Write locks are easy - we just set bit 31. When unlocking, we can
* just write zero since the lock is exclusively held.
*/
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1:
ldrex
%0, [%1]\n"
"
teq
%0, #0\n"
WFE("ne")
"
strexeq
%0, %2, [%1]\n"
"
teq
%0, #0\n"
"
bne
1b"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
smp_mb();
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
prefetchw(&rw->lock);
do {
__asm__ __volatile__(
"
ldrex
%0, [%2]\n"
"
mov
%1, #0\n"
"
teq
%0, #0\n"
"
strexeq
%1, %3, [%2]"
: "=&r" (contended), "=&r" (res)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
} while (res);
if (!contended) {
smp_mb();
return 1;
} else {
return 0;
}
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
"str
%1, [%0]\n"
:
: "r" (&rw->lock), "r" (0)
: "cc");
dsb_sev();
}
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
* - Increment it.
* - Store new lock value if positive, and we still own this location.
* If the value is negative, we've already failed.
* - If we failed to store the value, we want a negative result.
* - If we failed, try again.
* Unlocking is similarly hairy. We may have multiple read locks
* currently active. However, we know we won't have any write
* locks.
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
prefetchw(&rw->lock);
__asm__ __volatile__(
"1:
ldrex
%0, [%2]\n"
"
adds
%0, %0, #1\n"
"
strexpl
%1, %0, [%2]\n"
WFE("mi")
"
rsbpls
%0, %1, #0\n"
"
bmi
1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc");
smp_mb();
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
smp_mb();
prefetchw(&rw->lock);
__asm__ __volatile__(
"1:
ldrex
%0, [%2]\n"
"
sub
%0, %0, #1\n"
"
strex
%1, %0, [%2]\n"
"
teq
%1, #0\n"
"
bne
1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc");
if (tmp == 0)
dsb_sev();
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long contended, res;
prefetchw(&rw->lock);
do {
__asm__ __volatile__(
"
ldrex
%0, [%2]\n"
"
mov
%1, #0\n"
"
adds
%0, %0, #1\n"
"
strexpl
%1, %0, [%2]"
: "=&r" (contended), "=&r" (res)
: "r" (&rw->lock)
: "cc");
} while (res);
/* If the lock is negative, then it is already held for write. */
if (contended < 0x80000000) {
smp_mb();
return 1;
} else {
return 0;
}
}
转载请注明原文地址: https://ju.6miu.com/read-32785.html