mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Locking changes for v6.15:
Locking primitives: - Micro-optimize percpu_{,try_}cmpxchg{64,128}_op() and {,try_}cmpxchg{64,128} on x86 (Uros Bizjak) - mutexes: extend debug checks in mutex_lock() (Yunhui Cui) - Misc cleanups (Uros Bizjak) Lockdep: - Fix might_fault() lockdep check of current->mm->mmap_lock (Peter Zijlstra) - Don't disable interrupts on RT in disable_irq_nosync_lockdep.*() (Sebastian Andrzej Siewior) - Disable KASAN instrumentation of lockdep.c (Waiman Long) - Add kasan_check_byte() check in lock_acquire() (Waiman Long) - Misc cleanups (Sebastian Andrzej Siewior) Rust runtime integration: - Use Pin for all LockClassKey usages (Mitchell Levy) - sync: Add accessor for the lock behind a given guard (Alice Ryhl) - sync: condvar: Add wait_interruptible_freezable() (Alice Ryhl) - sync: lock: Add an example for Guard:: Lock_ref() (Boqun Feng) Split-lock detection feature (x86): - Fix warning mode with disabled mitigation mode (Maksim Davydov) Locking events: - Add locking events for rtmutex slow paths (Waiman Long) - Add locking events for lockdep (Waiman Long) Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmfeeMARHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1j+DQ/7BUherLWPuvGCx+0+FwBG9T2XFKm8cy8r 8p7UM/0gOzMn+EvBz+LFL2/b5BWu2tjB2Qyen2tvq8NcDQdS8GpFA+u9rxeXQzQY tu5LxsnQbvQe0UXl5aJX1D8ft2xmRSU+a/2uQC3/PAbXByTwN/dkEqDoxJQG6GuP 0mpULlbG0D5j2YiiaQyG2+3xKj+fd1mg/aEoG5lx88ko6Bgoguj8b+tX/4f70YWl igNxWoJ8CZxCBbd7+o8vFFvvYpk1sj6Ni3LyTs658t5deJpfxOu9xkrmlxGm/d7q IryuiQC7yYwWWFF96W3yJ13lyojKZTVCYr50hzMd88HE/NGJawZZQJMtyeRGS2r9 7wNZDl0JiPRUgl8bTFOHZUgVU5IIgTSGpgv4XHvUFF0+QtZ91IqB+/fcMIpdEBV9 K02wOfqIb3uUsCXGmNfFVi1E7TeXWUDudqHN7rosxOpFDSm1PvGI4rnnaNjddVr3 kerNfRSyoBaj5Ff1zr59yM8XZVBPmY8MrruwoODMxxcfasM6vllEjv9McBRSoxlb HC3+wXaadWlUnaitaVU6Xak9qIj0djaSgQfQ9nS48XuN4EfztepLYM9OEPAsNWXh 5NZDdYXB1ndYsDTlCLiEl2c0831duJpy2kpVOkaCqC3hu+JjVt82ZeeBhOZeAXQK glwrSkq0FiU= =33q7 -----END PGP SIGNATURE----- Merge tag 'locking-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Ingo Molnar: "Locking primitives: - Micro-optimize percpu_{,try_}cmpxchg{64,128}_op() and {,try_}cmpxchg{64,128} on x86 (Uros Bizjak) - mutexes: extend debug checks in mutex_lock() (Yunhui Cui) - Misc cleanups (Uros Bizjak) Lockdep: - Fix might_fault() lockdep check of current->mm->mmap_lock (Peter Zijlstra) - Don't disable interrupts on RT in disable_irq_nosync_lockdep.*() (Sebastian Andrzej Siewior) - Disable KASAN instrumentation of lockdep.c (Waiman Long) - Add kasan_check_byte() check in lock_acquire() (Waiman Long) - Misc cleanups (Sebastian Andrzej Siewior) Rust runtime integration: - Use Pin for all LockClassKey usages (Mitchell Levy) - sync: Add accessor for the lock behind a given guard (Alice Ryhl) - sync: condvar: Add wait_interruptible_freezable() (Alice Ryhl) - sync: lock: Add an example for Guard:: Lock_ref() (Boqun Feng) Split-lock detection feature (x86): - Fix warning mode with disabled mitigation mode (Maksim Davydov) Locking events: - Add locking events for rtmutex slow paths (Waiman Long) - Add locking events for lockdep (Waiman Long)" * tag 'locking-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lockdep: Remove disable_irq_lockdep() lockdep: Don't disable interrupts on RT in disable_irq_nosync_lockdep.*() rust: lockdep: Use Pin for all LockClassKey usages rust: sync: condvar: Add wait_interruptible_freezable() rust: sync: lock: Add an example for Guard:: Lock_ref() rust: sync: Add accessor for the lock behind a given guard locking/lockdep: Add kasan_check_byte() check in lock_acquire() locking/lockdep: Disable KASAN instrumentation of lockdep.c locking/lock_events: Add locking events for lockdep locking/lock_events: Add locking events for rtmutex slow paths x86/split_lock: Fix the delayed detection logic lockdep/mm: Fix might_fault() lockdep check of current->mm->mmap_lock x86/locking: Remove semicolon from "lock" prefix locking/mutex: Add MUTEX_WARN_ON() into fast path x86/locking: Use asm_inline for {,try_}cmpxchg{64,128} emulations x86/locking: Use ALT_OUTPUT_SP() for percpu_{,try_}cmpxchg{64,128}_op()
This commit is contained in:
commit
23608993bb
@ -48,7 +48,7 @@
|
||||
".popsection\n" \
|
||||
"671:"
|
||||
|
||||
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
|
||||
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock "
|
||||
|
||||
#else /* ! CONFIG_SMP */
|
||||
#define LOCK_PREFIX_HERE ""
|
||||
|
@ -12,11 +12,11 @@
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
|
||||
#define mb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "mfence", \
|
||||
X86_FEATURE_XMM2) ::: "memory", "cc")
|
||||
#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
|
||||
#define rmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "lfence", \
|
||||
X86_FEATURE_XMM2) ::: "memory", "cc")
|
||||
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
|
||||
#define wmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "sfence", \
|
||||
X86_FEATURE_XMM2) ::: "memory", "cc")
|
||||
#else
|
||||
#define __mb() asm volatile("mfence":::"memory")
|
||||
@ -50,7 +50,7 @@
|
||||
#define __dma_rmb() barrier()
|
||||
#define __dma_wmb() barrier()
|
||||
|
||||
#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
|
||||
#define __smp_mb() asm volatile("lock addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
|
||||
|
||||
#define __smp_rmb() dma_rmb()
|
||||
#define __smp_wmb() barrier()
|
||||
|
@ -134,7 +134,7 @@ extern void __add_wrong_size(void)
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
|
||||
|
||||
#define __sync_cmpxchg(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), "lock ")
|
||||
|
||||
#define __cmpxchg_local(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), "")
|
||||
@ -222,7 +222,7 @@ extern void __add_wrong_size(void)
|
||||
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
|
||||
|
||||
#define __sync_try_cmpxchg(ptr, pold, new, size) \
|
||||
__raw_try_cmpxchg((ptr), (pold), (new), (size), "lock; ")
|
||||
__raw_try_cmpxchg((ptr), (pold), (new), (size), "lock ")
|
||||
|
||||
#define __try_cmpxchg_local(ptr, pold, new, size) \
|
||||
__raw_try_cmpxchg((ptr), (pold), (new), (size), "")
|
||||
|
@ -91,19 +91,21 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
|
||||
union __u64_halves o = { .full = (_old), }, \
|
||||
n = { .full = (_new), }; \
|
||||
\
|
||||
asm volatile(ALTERNATIVE(_lock_loc \
|
||||
"call cmpxchg8b_emu", \
|
||||
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
|
||||
: ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
|
||||
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
|
||||
: "memory"); \
|
||||
asm_inline volatile( \
|
||||
ALTERNATIVE(_lock_loc \
|
||||
"call cmpxchg8b_emu", \
|
||||
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
|
||||
: ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
|
||||
: "b" (n.low), "c" (n.high), \
|
||||
[ptr] "S" (_ptr) \
|
||||
: "memory"); \
|
||||
\
|
||||
o.full; \
|
||||
})
|
||||
|
||||
static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
|
||||
{
|
||||
return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
|
||||
return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock ");
|
||||
}
|
||||
#define arch_cmpxchg64 arch_cmpxchg64
|
||||
|
||||
@ -119,14 +121,16 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
|
||||
n = { .full = (_new), }; \
|
||||
bool ret; \
|
||||
\
|
||||
asm volatile(ALTERNATIVE(_lock_loc \
|
||||
"call cmpxchg8b_emu", \
|
||||
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
|
||||
CC_SET(e) \
|
||||
: ALT_OUTPUT_SP(CC_OUT(e) (ret), \
|
||||
"+a" (o.low), "+d" (o.high)) \
|
||||
: "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
|
||||
: "memory"); \
|
||||
asm_inline volatile( \
|
||||
ALTERNATIVE(_lock_loc \
|
||||
"call cmpxchg8b_emu", \
|
||||
_lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
|
||||
CC_SET(e) \
|
||||
: ALT_OUTPUT_SP(CC_OUT(e) (ret), \
|
||||
"+a" (o.low), "+d" (o.high)) \
|
||||
: "b" (n.low), "c" (n.high), \
|
||||
[ptr] "S" (_ptr) \
|
||||
: "memory"); \
|
||||
\
|
||||
if (unlikely(!ret)) \
|
||||
*(_oldp) = o.full; \
|
||||
@ -136,7 +140,7 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
|
||||
|
||||
static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
|
||||
{
|
||||
return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
|
||||
return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock ");
|
||||
}
|
||||
#define arch_try_cmpxchg64 arch_try_cmpxchg64
|
||||
|
||||
|
@ -13,7 +13,7 @@ static inline void edac_atomic_scrub(void *va, u32 size)
|
||||
* are interrupt, DMA and SMP safe.
|
||||
*/
|
||||
for (i = 0; i < size / 4; i++, virt_addr++)
|
||||
asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
|
||||
asm volatile("lock addl $0, %0"::"m" (*virt_addr));
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_EDAC_H */
|
||||
|
@ -348,15 +348,14 @@ do { \
|
||||
old__.var = _oval; \
|
||||
new__.var = _nval; \
|
||||
\
|
||||
asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
|
||||
"cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
|
||||
: [var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), \
|
||||
"+d" (old__.high) \
|
||||
: "b" (new__.low), \
|
||||
"c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
asm_inline qual ( \
|
||||
ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
|
||||
"cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
|
||||
: ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), "+d" (old__.high)) \
|
||||
: "b" (new__.low), "c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
\
|
||||
old__.var; \
|
||||
})
|
||||
@ -378,17 +377,16 @@ do { \
|
||||
old__.var = *_oval; \
|
||||
new__.var = _nval; \
|
||||
\
|
||||
asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
|
||||
"cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), \
|
||||
"+d" (old__.high) \
|
||||
: "b" (new__.low), \
|
||||
"c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
asm_inline qual ( \
|
||||
ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
|
||||
"cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
|
||||
CC_SET(z) \
|
||||
: ALT_OUTPUT_SP(CC_OUT(z) (success), \
|
||||
[var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), "+d" (old__.high)) \
|
||||
: "b" (new__.low), "c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
if (unlikely(!success)) \
|
||||
*_oval = old__.var; \
|
||||
\
|
||||
@ -419,15 +417,14 @@ do { \
|
||||
old__.var = _oval; \
|
||||
new__.var = _nval; \
|
||||
\
|
||||
asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
|
||||
"cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
|
||||
: [var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), \
|
||||
"+d" (old__.high) \
|
||||
: "b" (new__.low), \
|
||||
"c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
asm_inline qual ( \
|
||||
ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
|
||||
"cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
|
||||
: ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), "+d" (old__.high)) \
|
||||
: "b" (new__.low), "c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
\
|
||||
old__.var; \
|
||||
})
|
||||
@ -449,19 +446,19 @@ do { \
|
||||
old__.var = *_oval; \
|
||||
new__.var = _nval; \
|
||||
\
|
||||
asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
|
||||
"cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
|
||||
CC_SET(z) \
|
||||
: CC_OUT(z) (success), \
|
||||
[var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), \
|
||||
"+d" (old__.high) \
|
||||
: "b" (new__.low), \
|
||||
"c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
asm_inline qual ( \
|
||||
ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
|
||||
"cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
|
||||
CC_SET(z) \
|
||||
: ALT_OUTPUT_SP(CC_OUT(z) (success), \
|
||||
[var] "+m" (__my_cpu_var(_var)), \
|
||||
"+a" (old__.low), "+d" (old__.high)) \
|
||||
: "b" (new__.low), "c" (new__.high), \
|
||||
"S" (&(_var)) \
|
||||
: "memory"); \
|
||||
if (unlikely(!success)) \
|
||||
*_oval = old__.var; \
|
||||
\
|
||||
likely(success); \
|
||||
})
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
*/
|
||||
static inline void sync_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
|
||||
asm volatile("lock " __ASM_SIZE(bts) " %1,%0"
|
||||
: "+m" (ADDR)
|
||||
: "Ir" (nr)
|
||||
: "memory");
|
||||
@ -49,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
|
||||
asm volatile("lock " __ASM_SIZE(btr) " %1,%0"
|
||||
: "+m" (ADDR)
|
||||
: "Ir" (nr)
|
||||
: "memory");
|
||||
@ -66,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void sync_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
|
||||
asm volatile("lock " __ASM_SIZE(btc) " %1,%0"
|
||||
: "+m" (ADDR)
|
||||
: "Ir" (nr)
|
||||
: "memory");
|
||||
@ -82,7 +82,7 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
|
||||
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(bts), *addr, c, "Ir", nr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -95,7 +95,7 @@ static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
|
||||
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btr), *addr, c, "Ir", nr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -108,7 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
|
||||
return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btc), *addr, c, "Ir", nr);
|
||||
}
|
||||
|
||||
#define sync_test_bit(nr, addr) test_bit(nr, addr)
|
||||
|
@ -192,7 +192,13 @@ static void __split_lock_reenable(struct work_struct *work)
|
||||
{
|
||||
sld_update_msr(true);
|
||||
}
|
||||
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
|
||||
/*
|
||||
* In order for each CPU to schedule its delayed work independently of the
|
||||
* others, delayed work struct must be per-CPU. This is not required when
|
||||
* sysctl_sld_mitigate is enabled because of the semaphore that limits
|
||||
* the number of simultaneously scheduled delayed works to 1.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
|
||||
|
||||
/*
|
||||
* If a CPU goes offline with pending delayed work to re-enable split lock
|
||||
@ -213,7 +219,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
|
||||
|
||||
static void split_lock_warn(unsigned long ip)
|
||||
{
|
||||
struct delayed_work *work;
|
||||
struct delayed_work *work = NULL;
|
||||
int cpu;
|
||||
|
||||
if (!current->reported_split_lock)
|
||||
@ -235,11 +241,17 @@ static void split_lock_warn(unsigned long ip)
|
||||
if (down_interruptible(&buslock_sem) == -EINTR)
|
||||
return;
|
||||
work = &sl_reenable_unlock;
|
||||
} else {
|
||||
work = &sl_reenable;
|
||||
}
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (!work) {
|
||||
work = this_cpu_ptr(&sl_reenable);
|
||||
/* Deferred initialization of per-CPU struct */
|
||||
if (!work->work.func)
|
||||
INIT_DELAYED_WORK(work, __split_lock_reenable);
|
||||
}
|
||||
|
||||
schedule_delayed_work_on(cpu, work, 2);
|
||||
|
||||
/* Disable split lock detection on this CPU to make progress */
|
||||
|
@ -448,7 +448,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
|
||||
static inline void disable_irq_nosync_lockdep(unsigned int irq)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
@ -456,22 +456,14 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
|
||||
static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
|
||||
{
|
||||
disable_irq_nosync(irq);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
|
||||
local_irq_save(*flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void disable_irq_lockdep(unsigned int irq)
|
||||
{
|
||||
disable_irq(irq);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void enable_irq_lockdep(unsigned int irq)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
|
||||
local_irq_enable();
|
||||
#endif
|
||||
enable_irq(irq);
|
||||
@ -479,7 +471,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
|
||||
|
||||
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
|
||||
local_irq_restore(*flags);
|
||||
#endif
|
||||
enable_irq(irq);
|
||||
|
@ -5,7 +5,8 @@ KCOV_INSTRUMENT := n
|
||||
|
||||
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
|
||||
|
||||
# Avoid recursion lockdep -> sanitizer -> ... -> lockdep.
|
||||
# Avoid recursion lockdep -> sanitizer -> ... -> lockdep & improve performance.
|
||||
KASAN_SANITIZE_lockdep.o := n
|
||||
KCSAN_SANITIZE_lockdep.o := n
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
|
@ -67,3 +67,31 @@ LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
|
||||
LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
|
||||
LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
|
||||
LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
|
||||
|
||||
/*
|
||||
* Locking events for rtlock_slowlock()
|
||||
*/
|
||||
LOCK_EVENT(rtlock_slowlock) /* # of rtlock_slowlock() calls */
|
||||
LOCK_EVENT(rtlock_slow_acq1) /* # of locks acquired after wait_lock */
|
||||
LOCK_EVENT(rtlock_slow_acq2) /* # of locks acquired in for loop */
|
||||
LOCK_EVENT(rtlock_slow_sleep) /* # of sleeps */
|
||||
LOCK_EVENT(rtlock_slow_wake) /* # of wakeup's */
|
||||
|
||||
/*
|
||||
* Locking events for rt_mutex_slowlock()
|
||||
*/
|
||||
LOCK_EVENT(rtmutex_slowlock) /* # of rt_mutex_slowlock() calls */
|
||||
LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */
|
||||
LOCK_EVENT(rtmutex_slow_acq1) /* # of locks acquired after wait_lock */
|
||||
LOCK_EVENT(rtmutex_slow_acq2) /* # of locks acquired at the end */
|
||||
LOCK_EVENT(rtmutex_slow_acq3) /* # of locks acquired in *block() */
|
||||
LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps */
|
||||
LOCK_EVENT(rtmutex_slow_wake) /* # of wakeup's */
|
||||
LOCK_EVENT(rtmutex_deadlock) /* # of rt_mutex_handle_deadlock()'s */
|
||||
|
||||
/*
|
||||
* Locking events for lockdep
|
||||
*/
|
||||
LOCK_EVENT(lockdep_acquire)
|
||||
LOCK_EVENT(lockdep_lock)
|
||||
LOCK_EVENT(lockdep_nocheck)
|
||||
|
@ -57,10 +57,12 @@
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "lockdep_internals.h"
|
||||
#include "lock_events.h"
|
||||
|
||||
#include <trace/events/lock.h>
|
||||
|
||||
@ -170,6 +172,7 @@ static struct task_struct *lockdep_selftest_task_struct;
|
||||
static int graph_lock(void)
|
||||
{
|
||||
lockdep_lock();
|
||||
lockevent_inc(lockdep_lock);
|
||||
/*
|
||||
* Make sure that if another CPU detected a bug while
|
||||
* walking the graph we dont change it (while the other
|
||||
@ -5091,8 +5094,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (unlikely(lock->key == &__lockdep_no_track__))
|
||||
return 0;
|
||||
|
||||
if (!prove_locking || lock->key == &__lockdep_no_validate__)
|
||||
lockevent_inc(lockdep_acquire);
|
||||
|
||||
if (!prove_locking || lock->key == &__lockdep_no_validate__) {
|
||||
check = 0;
|
||||
lockevent_inc(lockdep_nocheck);
|
||||
}
|
||||
|
||||
if (subclass < NR_LOCKDEP_CACHING_CLASSES)
|
||||
class = lock->class_cache[subclass];
|
||||
@ -5824,6 +5831,14 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (!debug_locks)
|
||||
return;
|
||||
|
||||
/*
|
||||
* As KASAN instrumentation is disabled and lock_acquire() is usually
|
||||
* the first lockdep call when a task tries to acquire a lock, add
|
||||
* kasan_check_byte() here to check for use-after-free and other
|
||||
* memory errors.
|
||||
*/
|
||||
kasan_check_byte(lock);
|
||||
|
||||
if (unlikely(!lockdep_enabled())) {
|
||||
/* XXX allow trylock from NMI ?!? */
|
||||
if (lockdep_nmi() && !trylock) {
|
||||
|
@ -143,6 +143,8 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
|
||||
unsigned long curr = (unsigned long)current;
|
||||
unsigned long zero = 0UL;
|
||||
|
||||
MUTEX_WARN_ON(lock->magic != lock);
|
||||
|
||||
if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
|
||||
return true;
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <trace/events/lock.h>
|
||||
|
||||
#include "rtmutex_common.h"
|
||||
#include "lock_events.h"
|
||||
|
||||
#ifndef WW_RT
|
||||
# define build_ww_mutex() (false)
|
||||
@ -1612,10 +1613,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
struct task_struct *owner;
|
||||
int ret = 0;
|
||||
|
||||
lockevent_inc(rtmutex_slow_block);
|
||||
for (;;) {
|
||||
/* Try to acquire the lock: */
|
||||
if (try_to_take_rt_mutex(lock, current, waiter))
|
||||
if (try_to_take_rt_mutex(lock, current, waiter)) {
|
||||
lockevent_inc(rtmutex_slow_acq3);
|
||||
break;
|
||||
}
|
||||
|
||||
if (timeout && !timeout->task) {
|
||||
ret = -ETIMEDOUT;
|
||||
@ -1638,8 +1642,10 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
|
||||
owner = NULL;
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) {
|
||||
lockevent_inc(rtmutex_slow_sleep);
|
||||
rt_mutex_schedule();
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
set_current_state(state);
|
||||
@ -1694,6 +1700,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
lockevent_inc(rtmutex_slowlock);
|
||||
|
||||
/* Try to acquire the lock again: */
|
||||
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
||||
@ -1701,6 +1708,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
}
|
||||
lockevent_inc(rtmutex_slow_acq1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1719,10 +1727,12 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
|
||||
__ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
}
|
||||
lockevent_inc(rtmutex_slow_acq2);
|
||||
} else {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_waiter(lock, waiter);
|
||||
rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
|
||||
lockevent_inc(rtmutex_deadlock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1751,6 +1761,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
|
||||
&waiter, wake_q);
|
||||
|
||||
debug_rt_mutex_free_waiter(&waiter);
|
||||
lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1823,9 +1834,12 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
||||
struct task_struct *owner;
|
||||
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
lockevent_inc(rtlock_slowlock);
|
||||
|
||||
if (try_to_take_rt_mutex(lock, current, NULL))
|
||||
if (try_to_take_rt_mutex(lock, current, NULL)) {
|
||||
lockevent_inc(rtlock_slow_acq1);
|
||||
return;
|
||||
}
|
||||
|
||||
rt_mutex_init_rtlock_waiter(&waiter);
|
||||
|
||||
@ -1838,8 +1852,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
||||
|
||||
for (;;) {
|
||||
/* Try to acquire the lock again */
|
||||
if (try_to_take_rt_mutex(lock, current, &waiter))
|
||||
if (try_to_take_rt_mutex(lock, current, &waiter)) {
|
||||
lockevent_inc(rtlock_slow_acq2);
|
||||
break;
|
||||
}
|
||||
|
||||
if (&waiter == rt_mutex_top_waiter(lock))
|
||||
owner = rt_mutex_owner(lock);
|
||||
@ -1847,8 +1863,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
||||
owner = NULL;
|
||||
raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
|
||||
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
|
||||
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) {
|
||||
lockevent_inc(rtlock_slow_sleep);
|
||||
schedule_rtlock();
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
set_current_state(TASK_RTLOCK_WAIT);
|
||||
@ -1865,6 +1883,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
|
||||
debug_rt_mutex_free_waiter(&waiter);
|
||||
|
||||
trace_contention_end(lock, 0);
|
||||
lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q));
|
||||
}
|
||||
|
||||
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
|
||||
|
@ -6834,10 +6834,8 @@ void __might_fault(const char *file, int line)
|
||||
if (pagefault_disabled())
|
||||
return;
|
||||
__might_sleep(file, line);
|
||||
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_lock);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__might_fault);
|
||||
#endif
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "signal.c"
|
||||
#include "slab.c"
|
||||
#include "spinlock.c"
|
||||
#include "sync.c"
|
||||
#include "task.c"
|
||||
#include "uaccess.c"
|
||||
#include "vmalloc.c"
|
||||
|
13
rust/helpers/sync.c
Normal file
13
rust/helpers/sync.c
Normal file
@ -0,0 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
void rust_helper_lockdep_register_key(struct lock_class_key *k)
|
||||
{
|
||||
lockdep_register_key(k);
|
||||
}
|
||||
|
||||
void rust_helper_lockdep_unregister_key(struct lock_class_key *k)
|
||||
{
|
||||
lockdep_unregister_key(k);
|
||||
}
|
@ -5,6 +5,8 @@
|
||||
//! This module contains the kernel APIs related to synchronisation that have been ported or
|
||||
//! wrapped for usage by Rust code in the kernel.
|
||||
|
||||
use crate::pin_init;
|
||||
use crate::prelude::*;
|
||||
use crate::types::Opaque;
|
||||
|
||||
mod arc;
|
||||
@ -23,15 +25,64 @@ pub use locked_by::LockedBy;
|
||||
|
||||
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
|
||||
#[repr(transparent)]
|
||||
pub struct LockClassKey(Opaque<bindings::lock_class_key>);
|
||||
#[pin_data(PinnedDrop)]
|
||||
pub struct LockClassKey {
|
||||
#[pin]
|
||||
inner: Opaque<bindings::lock_class_key>,
|
||||
}
|
||||
|
||||
// SAFETY: `bindings::lock_class_key` is designed to be used concurrently from multiple threads and
|
||||
// provides its own synchronization.
|
||||
unsafe impl Sync for LockClassKey {}
|
||||
|
||||
impl LockClassKey {
|
||||
/// Initializes a dynamically allocated lock class key. In the common case of using a
|
||||
/// statically allocated lock class key, the static_lock_class! macro should be used instead.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use kernel::{c_str, stack_pin_init};
|
||||
/// # use kernel::alloc::KBox;
|
||||
/// # use kernel::types::ForeignOwnable;
|
||||
/// # use kernel::sync::{LockClassKey, SpinLock};
|
||||
///
|
||||
/// let key = KBox::pin_init(LockClassKey::new_dynamic(), GFP_KERNEL)?;
|
||||
/// let key_ptr = key.into_foreign();
|
||||
///
|
||||
/// {
|
||||
/// stack_pin_init!(let num: SpinLock<u32> = SpinLock::new(
|
||||
/// 0,
|
||||
/// c_str!("my_spinlock"),
|
||||
/// // SAFETY: `key_ptr` is returned by the above `into_foreign()`, whose
|
||||
/// // `from_foreign()` has not yet been called.
|
||||
/// unsafe { <Pin<KBox<LockClassKey>> as ForeignOwnable>::borrow(key_ptr) }
|
||||
/// ));
|
||||
/// }
|
||||
///
|
||||
/// // SAFETY: We dropped `num`, the only use of the key, so the result of the previous
|
||||
/// // `borrow` has also been dropped. Thus, it's safe to use from_foreign.
|
||||
/// unsafe { drop(<Pin<KBox<LockClassKey>> as ForeignOwnable>::from_foreign(key_ptr)) };
|
||||
///
|
||||
/// # Ok::<(), Error>(())
|
||||
/// ```
|
||||
pub fn new_dynamic() -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
// SAFETY: lockdep_register_key expects an uninitialized block of memory
|
||||
inner <- Opaque::ffi_init(|slot| unsafe { bindings::lockdep_register_key(slot) })
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
|
||||
self.0.get()
|
||||
self.inner.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[pinned_drop]
|
||||
impl PinnedDrop for LockClassKey {
|
||||
fn drop(self: Pin<&mut Self>) {
|
||||
// SAFETY: self.as_ptr was registered with lockdep and self is pinned, so the address
|
||||
// hasn't changed. Thus, it's safe to pass to unregister.
|
||||
unsafe { bindings::lockdep_unregister_key(self.as_ptr()) }
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,7 +95,7 @@ macro_rules! static_lock_class {
|
||||
// SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated
|
||||
// lock_class_key
|
||||
unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
|
||||
&CLASS
|
||||
$crate::prelude::Pin::static_ref(&CLASS)
|
||||
}};
|
||||
}
|
||||
|
||||
|
@ -11,12 +11,13 @@ use crate::{
|
||||
init::PinInit,
|
||||
pin_init,
|
||||
str::CStr,
|
||||
task::{MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE},
|
||||
task::{
|
||||
MAX_SCHEDULE_TIMEOUT, TASK_FREEZABLE, TASK_INTERRUPTIBLE, TASK_NORMAL, TASK_UNINTERRUPTIBLE,
|
||||
},
|
||||
time::Jiffies,
|
||||
types::Opaque,
|
||||
};
|
||||
use core::marker::PhantomPinned;
|
||||
use core::ptr;
|
||||
use core::{marker::PhantomPinned, pin::Pin, ptr};
|
||||
use macros::pin_data;
|
||||
|
||||
/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class.
|
||||
@ -101,7 +102,7 @@ unsafe impl Sync for CondVar {}
|
||||
|
||||
impl CondVar {
|
||||
/// Constructs a new condvar initialiser.
|
||||
pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
|
||||
pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
_pin: PhantomPinned,
|
||||
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
|
||||
@ -159,6 +160,25 @@ impl CondVar {
|
||||
crate::current!().signal_pending()
|
||||
}
|
||||
|
||||
/// Releases the lock and waits for a notification in interruptible and freezable mode.
|
||||
///
|
||||
/// The process is allowed to be frozen during this sleep. No lock should be held when calling
|
||||
/// this function, and there is a lockdep assertion for this. Freezing a task that holds a lock
|
||||
/// can trivially deadlock vs another task that needs that lock to complete before it too can
|
||||
/// hit freezable.
|
||||
#[must_use = "wait_interruptible_freezable returns if a signal is pending, so the caller must check the return value"]
|
||||
pub fn wait_interruptible_freezable<T: ?Sized, B: Backend>(
|
||||
&self,
|
||||
guard: &mut Guard<'_, T, B>,
|
||||
) -> bool {
|
||||
self.wait_internal(
|
||||
TASK_INTERRUPTIBLE | TASK_FREEZABLE,
|
||||
guard,
|
||||
MAX_SCHEDULE_TIMEOUT,
|
||||
);
|
||||
crate::current!().signal_pending()
|
||||
}
|
||||
|
||||
/// Releases the lock and waits for a notification in interruptible mode.
|
||||
///
|
||||
/// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
|
||||
|
@ -12,7 +12,7 @@ use crate::{
|
||||
str::CStr,
|
||||
types::{NotThreadSafe, Opaque, ScopeGuard},
|
||||
};
|
||||
use core::{cell::UnsafeCell, marker::PhantomPinned};
|
||||
use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
|
||||
use macros::pin_data;
|
||||
|
||||
pub mod mutex;
|
||||
@ -129,7 +129,7 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
|
||||
|
||||
impl<T, B: Backend> Lock<T, B> {
|
||||
/// Constructs a new lock initialiser.
|
||||
pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
|
||||
pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
data: UnsafeCell::new(t),
|
||||
_pin: PhantomPinned,
|
||||
@ -199,7 +199,36 @@ pub struct Guard<'a, T: ?Sized, B: Backend> {
|
||||
// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
|
||||
unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
|
||||
|
||||
impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
|
||||
impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
|
||||
/// Returns the lock that this guard originates from.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
|
||||
/// lock is held.
|
||||
///
|
||||
/// ```
|
||||
/// # use kernel::{new_spinlock, stack_pin_init, sync::lock::{Backend, Guard, Lock}};
|
||||
///
|
||||
/// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
|
||||
/// // Address-equal means the same lock.
|
||||
/// assert!(core::ptr::eq(guard.lock_ref(), lock));
|
||||
/// }
|
||||
///
|
||||
/// // Creates a new lock on the stack.
|
||||
/// stack_pin_init!{
|
||||
/// let l = new_spinlock!(42)
|
||||
/// }
|
||||
///
|
||||
/// let g = l.lock();
|
||||
///
|
||||
/// // `g` originates from `l`.
|
||||
/// assert_held(&g, &l);
|
||||
/// ```
|
||||
pub fn lock_ref(&self) -> &'a Lock<T, B> {
|
||||
self.lock
|
||||
}
|
||||
|
||||
pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
|
||||
// SAFETY: The caller owns the lock, so it is safe to unlock it.
|
||||
unsafe { B::unlock(self.lock.state.get(), &self.state) };
|
||||
|
@ -13,6 +13,7 @@ use crate::{
|
||||
use core::{
|
||||
cell::UnsafeCell,
|
||||
marker::{PhantomData, PhantomPinned},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
/// Trait implemented for marker types for global locks.
|
||||
@ -26,7 +27,7 @@ pub trait GlobalLockBackend {
|
||||
/// The backend used for this global lock.
|
||||
type Backend: Backend + 'static;
|
||||
/// The class for this global lock.
|
||||
fn get_lock_class() -> &'static LockClassKey;
|
||||
fn get_lock_class() -> Pin<&'static LockClassKey>;
|
||||
}
|
||||
|
||||
/// Type used for global locks.
|
||||
@ -270,7 +271,7 @@ macro_rules! global_lock {
|
||||
type Item = $valuety;
|
||||
type Backend = $crate::global_lock_inner!(backend $kind);
|
||||
|
||||
fn get_lock_class() -> &'static $crate::sync::LockClassKey {
|
||||
fn get_lock_class() -> Pin<&'static $crate::sync::LockClassKey> {
|
||||
$crate::static_lock_class!()
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ pub struct PollCondVar {
|
||||
|
||||
impl PollCondVar {
|
||||
/// Constructs a new condvar initialiser.
|
||||
pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
|
||||
pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
inner <- CondVar::new(name, key),
|
||||
})
|
||||
|
@ -23,6 +23,8 @@ pub const MAX_SCHEDULE_TIMEOUT: c_long = c_long::MAX;
|
||||
pub const TASK_INTERRUPTIBLE: c_int = bindings::TASK_INTERRUPTIBLE as c_int;
|
||||
/// Bitmask for tasks that are sleeping in an uninterruptible state.
|
||||
pub const TASK_UNINTERRUPTIBLE: c_int = bindings::TASK_UNINTERRUPTIBLE as c_int;
|
||||
/// Bitmask for tasks that are sleeping in a freezable state.
|
||||
pub const TASK_FREEZABLE: c_int = bindings::TASK_FREEZABLE as c_int;
|
||||
/// Convenience constant for waking up tasks regardless of whether they are in interruptible or
|
||||
/// uninterruptible sleep.
|
||||
pub const TASK_NORMAL: c_uint = bindings::TASK_NORMAL as c_uint;
|
||||
|
@ -369,7 +369,7 @@ unsafe impl<T: ?Sized, const ID: u64> Sync for Work<T, ID> {}
|
||||
impl<T: ?Sized, const ID: u64> Work<T, ID> {
|
||||
/// Creates a new instance of [`Work`].
|
||||
#[inline]
|
||||
pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self>
|
||||
pub fn new(name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self>
|
||||
where
|
||||
T: WorkItem<ID>,
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user