RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
cpu_mp.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2018-10-30 Bernard The first version
9 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
10 * 2023-12-10 xqyjlj spinlock should lock sched
11 * 2024-01-25 Shell Using rt_exit_critical_safe
12 */
13#include <rthw.h>
14#include <rtthread.h>
15
16#ifdef RT_USING_SMART
17#include <lwp.h>
18#endif
19
20#ifdef RT_USING_DEBUG
21rt_base_t _cpus_critical_level;
22#endif /* RT_USING_DEBUG */
23
24static struct rt_cpu _cpus[RT_CPUS_NR];
25rt_hw_spinlock_t _cpus_lock;
26#if defined(RT_DEBUGING_SPINLOCK)
27void *_cpus_lock_owner = 0;
28void *_cpus_lock_pc = 0;
29
30#endif /* RT_DEBUGING_SPINLOCK */
31
38{
39 rt_hw_spin_lock_init(&lock->lock);
40}
42
43
51void rt_spin_lock(struct rt_spinlock *lock)
52{
54 rt_hw_spin_lock(&lock->lock);
56}
58
59
64void rt_spin_unlock(struct rt_spinlock *lock)
65{
66 rt_base_t critical_level;
67 RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
68 rt_hw_spin_unlock(&lock->lock);
69 rt_exit_critical_safe(critical_level);
70}
72
73
84{
85 rt_base_t level;
86
89 rt_hw_spin_lock(&lock->lock);
91 return level;
92}
94
95
103{
104 rt_base_t critical_level;
105
106 RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
107 rt_hw_spin_unlock(&lock->lock);
108 rt_exit_critical_safe(critical_level);
110}
112
113
118struct rt_cpu *rt_cpu_self(void)
119{
120 return &_cpus[rt_hw_cpu_id()];
121}
122
130struct rt_cpu *rt_cpu_index(int index)
131{
132 return &_cpus[index];
133}
134
141{
142 rt_base_t level;
143 struct rt_cpu* pcpu;
144
145 level = rt_hw_local_irq_disable();
146 pcpu = rt_cpu_self();
147 if (pcpu->current_thread != RT_NULL)
148 {
149 rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
150
151 rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
152 if (lock_nest == 0)
153 {
156#ifdef RT_USING_DEBUG
157 _cpus_critical_level = rt_critical_level();
158#endif /* RT_USING_DEBUG */
159
160#ifdef RT_DEBUGING_SPINLOCK
161 _cpus_lock_owner = pcpu->current_thread;
162 _cpus_lock_pc = __GET_RETURN_ADDRESS;
163#endif /* RT_DEBUGING_SPINLOCK */
164 }
165 }
166
167 return level;
168}
170
177{
178 struct rt_cpu* pcpu = rt_cpu_self();
179
180 if (pcpu->current_thread != RT_NULL)
181 {
182 rt_base_t critical_level = 0;
183 RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
184 rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
185
186 if (pcpu->current_thread->cpus_lock_nest == 0)
187 {
188#if defined(RT_DEBUGING_SPINLOCK)
189 _cpus_lock_owner = __OWNER_MAGIC;
190 _cpus_lock_pc = RT_NULL;
191#endif /* RT_DEBUGING_SPINLOCK */
192#ifdef RT_USING_DEBUG
193 critical_level = _cpus_critical_level;
194 _cpus_critical_level = 0;
195#endif /* RT_USING_DEBUG */
197 rt_exit_critical_safe(critical_level);
198 }
199 }
201}
203
212{
213#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
214 lwp_aspace_switch(thread);
215#endif
216 rt_sched_post_ctx_switch(thread);
217}
219
220/* A safe API with debugging feature to be called in most codes */
221
222#undef rt_cpu_get_id
rt_base_t rt_cpus_lock(void)
This function will lock all cpus's scheduler and disable local irq.
void rt_cpus_unlock(rt_base_t level)
This function will restore all cpus's scheduler and restore local irq.
struct rt_cpu * rt_cpu_index(int index)
This fucntion will return the cpu object corresponding to index.
struct rt_cpu * rt_cpu_self(void)
This fucntion will return current cpu object.
rt_hw_spinlock_t _cpus_lock
void rt_cpus_lock_status_restore(struct rt_thread *thread)
rt_base_t rt_cpu_get_id(void)
Get logical CPU ID
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
This function will disable the local interrupt and then lock the spinlock, will lock the thread sched...
void rt_spin_lock(struct rt_spinlock *lock)
This function will lock the spinlock, will lock the thread scheduler.
void rt_spin_lock_init(struct rt_spinlock *lock)
Initialize a static spinlock object.
void rt_spin_unlock(struct rt_spinlock *lock)
This function will unlock the spinlock, will unlock the thread scheduler.
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
This function will unlock the spinlock and then restore current cpu interrupt status,...
rt_weak rt_bool_t rt_hw_interrupt_is_disabled(void)
定义 irq.c:151
rt_inline rt_bool_t rt_scheduler_is_available(void)
#define RT_ASSERT(EX)
#define rt_sched_thread_is_binding(thread)
void rt_exit_critical_safe(rt_base_t critical_level)
rt_base_t rt_enter_critical(void)
This function will lock the thread scheduler.
rt_uint16_t rt_critical_level(void)
Get the scheduler lock level.
#define rt_atomic_sub(ptr, v)
#define rt_atomic_add(ptr, v)
#define rt_atomic_load(ptr)
#define rt_hw_spin_lock(lock)
定义 rthw.h:235
int rt_hw_cpu_id(void)
#define rt_hw_spin_unlock(lock)
定义 rthw.h:236
#define rt_hw_local_irq_disable
定义 rthw.h:152
#define rt_hw_local_irq_enable
定义 rthw.h:153
#define RTM_EXPORT(symbol)
定义 rtm.h:33
rt_int32_t rt_base_t
#define RT_SPIN_UNLOCK_DEBUG(lock, critical)
#define RT_SPIN_LOCK_DEBUG(lock)
rt_uint32_t rt_ubase_t
#define RT_NULL
struct rt_thread * current_thread
rt_ubase_t lock