sync.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef _HARDWARE_SYNC_H
8 #define _HARDWARE_SYNC_H
9 
10 #include "pico.h"
12 #include "hardware/regs/sio.h"
13 
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17 
48 // PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
49 #ifndef PARAM_ASSERTIONS_ENABLED_SYNC
50 #define PARAM_ASSERTIONS_ENABLED_SYNC 0
51 #endif
52 
56 typedef volatile uint32_t spin_lock_t;
57 
58 // PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
59 #ifndef PICO_SPINLOCK_ID_IRQ
60 #define PICO_SPINLOCK_ID_IRQ 9
61 #endif
62 
63 // PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
64 #ifndef PICO_SPINLOCK_ID_TIMER
65 #define PICO_SPINLOCK_ID_TIMER 10
66 #endif
67 
68 // PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
69 #ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
70 #define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
71 #endif
72 
73 // PICO_CONFIG: PICO_SPINLOCK_ID_RAND, Spinlock ID for Random Number Generator, min=0, max=31, default=12, group=hardware_sync
74 #ifndef PICO_SPINLOCK_ID_RAND
75 #define PICO_SPINLOCK_ID_RAND 12
76 #endif
77 
78 // PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
79 #ifndef PICO_SPINLOCK_ID_OS1
80 #define PICO_SPINLOCK_ID_OS1 14
81 #endif
82 
83 // PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
84 #ifndef PICO_SPINLOCK_ID_OS2
85 #define PICO_SPINLOCK_ID_OS2 15
86 #endif
87 
88 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
89 #ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
90 #define PICO_SPINLOCK_ID_STRIPED_FIRST 16
91 #endif
92 
93 // PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
94 #ifndef PICO_SPINLOCK_ID_STRIPED_LAST
95 #define PICO_SPINLOCK_ID_STRIPED_LAST 23
96 #endif
97 
98 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
99 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
100 #define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
101 #endif
102 
103 #ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
104 #warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
105 #endif
106 
107 // PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
108 #ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
109 #define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
110 #endif
111 
117 __force_inline static void __sev(void) {
118  __asm volatile ("sev");
119 }
120 
127 __force_inline static void __wfe(void) {
128  __asm volatile ("wfe");
129 }
130 
136 __force_inline static void __wfi(void) {
137  __asm volatile ("wfi");
138 }
139 
146 __force_inline static void __dmb(void) {
147  __asm volatile ("dmb" : : : "memory");
148 }
149 
157 __force_inline static void __dsb(void) {
158  __asm volatile ("dsb" : : : "memory");
159 }
160 
168 __force_inline static void __isb(void) {
169  __asm volatile ("isb");
170 }
171 
176  // the original code below makes it hard for us to be included from C++ via a header
177  // which itself is in an extern "C", so just use __dmb instead, which is what
178  // is required on Cortex M0+
179  __dmb();
180 //#ifndef __cplusplus
181 // atomic_thread_fence(memory_order_acquire);
182 //#else
183 // std::atomic_thread_fence(std::memory_order_acquire);
184 //#endif
185 }
186 
192  // the original code below makes it hard for us to be included from C++ via a header
193  // which itself is in an extern "C", so just use __dmb instead, which is what
194  // is required on Cortex M0+
195  __dmb();
196 //#ifndef __cplusplus
197 // atomic_thread_fence(memory_order_release);
198 //#else
199 // std::atomic_thread_fence(std::memory_order_release);
200 //#endif
201 }
202 
209  uint32_t status;
210  __asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
211  __asm volatile ("cpsid i");
212  return status;
213 }
214 
220 __force_inline static void restore_interrupts(uint32_t status) {
221  __asm volatile ("msr PRIMASK,%0"::"r" (status) : );
222 }
223 
231  invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
232  return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
233 }
234 
242  invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
243  (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
244  ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
245  return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
246 }
247 
254  // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
255  // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
256  // anyway which should be finished soon
257  while (__builtin_expect(!*lock, 0));
259 }
260 
268  *lock = 0;
269 }
270 
280  uint32_t save = save_and_disable_interrupts();
282  return save;
283 }
284 
290 inline static bool is_spin_locked(spin_lock_t *lock) {
291  check_hw_size(spin_lock_t, 4);
292  uint lock_num = spin_lock_get_num(lock);
293  return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
294 }
295 
307 __force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
308  spin_unlock_unsafe(lock);
309  restore_interrupts(saved_irq);
310 }
311 
320 spin_lock_t *spin_lock_init(uint lock_num);
321 
325 void spin_locks_reset(void);
326 
342 uint next_striped_spin_lock_num(void);
343 
353 void spin_lock_claim(uint lock_num);
354 
364 void spin_lock_claim_mask(uint32_t lock_num_mask);
365 
373 void spin_lock_unclaim(uint lock_num);
374 
381 int spin_lock_claim_unused(bool required);
382 
391 bool spin_lock_is_claimed(uint lock_num);
392 
393 #define remove_volatile_cast(t, x) ({__mem_fence_acquire(); (t)(x); })
394 
395 #ifdef __cplusplus
396 }
397 #endif
398 
399 #endif
address_mapped.h
restore_interrupts
static __force_inline void restore_interrupts(uint32_t status)
Restore interrupts to a specified state.
Definition: sync.h:220
spin_lock_unclaim
void spin_lock_unclaim(uint lock_num)
Mark a spin lock as no longer used.
Definition: sync.c:50
__sev
static __force_inline void __sev(void)
Insert a SEV instruction in to the code path.
Definition: sync.h:117
__mem_fence_acquire
static __force_inline void __mem_fence_acquire(void)
Acquire a memory fence.
Definition: sync.h:175
spin_lock_unsafe_blocking
static __force_inline void spin_lock_unsafe_blocking(spin_lock_t *lock)
Acquire a spin lock without disabling interrupts (hence unsafe)
Definition: sync.h:253
save_and_disable_interrupts
static __force_inline uint32_t save_and_disable_interrupts(void)
Save and disable interrupts.
Definition: sync.h:208
__force_inline
#define __force_inline
Attribute to force inlining of a function regardless of optimization level.
Definition: platform.h:241
spin_lock_blocking
static __force_inline uint32_t spin_lock_blocking(spin_lock_t *lock)
Acquire a spin lock safely.
Definition: sync.h:279
__dsb
static __force_inline void __dsb(void)
Insert a DSB instruction in to the code path.
Definition: sync.h:157
__wfi
static __force_inline void __wfi(void)
Insert a WFI instruction in to the code path.
Definition: sync.h:136
spin_locks_reset
void spin_locks_reset(void)
Release all spin locks.
Definition: sync.c:18
spin_lock_claim
void spin_lock_claim(uint lock_num)
Mark a spin lock as used.
Definition: sync.c:39
spin_unlock
static __force_inline void spin_unlock(spin_lock_t *lock, uint32_t saved_irq)
Release a spin lock safely.
Definition: sync.h:307
spin_lock_init
spin_lock_t * spin_lock_init(uint lock_num)
Initialise a spin lock.
Definition: sync.c:24
pico.h
spin_lock_get_num
static __force_inline uint spin_lock_get_num(spin_lock_t *lock)
Get HW Spinlock number from instance.
Definition: sync.h:241
spin_unlock_unsafe
static __force_inline void spin_unlock_unsafe(spin_lock_t *lock)
Release a spin lock without re-enabling interrupts.
Definition: sync.h:266
__dmb
static __force_inline void __dmb(void)
Insert a DMB instruction in to the code path.
Definition: sync.h:146
is_spin_locked
static bool is_spin_locked(spin_lock_t *lock)
Check to see if a spinlock is currently acquired elsewhere.
Definition: sync.h:290
spin_lock_is_claimed
bool spin_lock_is_claimed(uint lock_num)
Determine if a spin lock is claimed.
Definition: sync.c:60
spin_lock_instance
static __force_inline spin_lock_t * spin_lock_instance(uint lock_num)
Get HW Spinlock instance from number.
Definition: sync.h:230
__isb
static __force_inline void __isb(void)
Insert a ISB instruction in to the code path.
Definition: sync.h:168
next_striped_spin_lock_num
uint next_striped_spin_lock_num(void)
Return a spin lock number from the striped range.
Definition: sync.c:31
__mem_fence_release
static __force_inline void __mem_fence_release(void)
Release a memory fence.
Definition: sync.h:191
__wfe
static __force_inline void __wfe(void)
Insert a WFE instruction in to the code path.
Definition: sync.h:127
spin_lock_claim_unused
int spin_lock_claim_unused(bool required)
Claim a free spin lock.
Definition: sync.c:56
spin_lock_t
volatile uint32_t spin_lock_t
A spin lock identifier.
Definition: sync.h:56
spin_lock_claim_mask
void spin_lock_claim_mask(uint32_t lock_num_mask)
Mark multiple spin locks as used.
Definition: sync.c:44