42#define DBG_TAG "kernel.scheduler"
43#define DBG_LVL DBG_INFO
49#define SCHEDULER_LOCK_FLAG(percpu) ((percpu)->sched_lock_flag)
51#define SCHEDULER_ENTER_CRITICAL(curthr) \
54 if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest++; \
57#define SCHEDULER_EXIT_CRITICAL(curthr) \
60 if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest--; \
63#define SCHEDULER_CONTEXT_LOCK(percpu) \
66 RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 0); \
67 _fast_spin_lock(&_mp_scheduler_lock); \
68 SCHEDULER_LOCK_FLAG(percpu) = 1; \
71#define SCHEDULER_CONTEXT_UNLOCK(percpu) \
74 RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 1); \
75 SCHEDULER_LOCK_FLAG(percpu) = 0; \
76 _fast_spin_unlock(&_mp_scheduler_lock); \
79#define SCHEDULER_LOCK(level) \
82 rt_thread_t _curthr; \
83 struct rt_cpu *_percpu; \
84 level = rt_hw_local_irq_disable(); \
85 _percpu = rt_cpu_self(); \
86 _curthr = _percpu->current_thread; \
87 SCHEDULER_ENTER_CRITICAL(_curthr); \
88 SCHEDULER_CONTEXT_LOCK(_percpu); \
91#define SCHEDULER_UNLOCK(level) \
94 rt_thread_t _curthr; \
95 struct rt_cpu *_percpu; \
96 _percpu = rt_cpu_self(); \
97 _curthr = _percpu->current_thread; \
98 SCHEDULER_CONTEXT_UNLOCK(_percpu); \
99 SCHEDULER_EXIT_CRITICAL(_curthr); \
100 rt_hw_local_irq_enable(level); \
103#ifdef ARCH_USING_HW_THREAD_SELF
104#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag)
105#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 1)
106#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 0)
109#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) ((pcpu)->critical_switch_flag)
110#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 1)
111#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 0)
116#if RT_THREAD_PRIORITY_MAX > 32
143#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
145static void (*rt_scheduler_switch_hook)(
struct rt_thread *tid);
161 rt_scheduler_hook = hook;
172 rt_scheduler_switch_hook = hook;
178#if RT_THREAD_PRIORITY_MAX > 32
185 number =
__rt_ffs(rt_thread_ready_priority_group) - 1;
188 highest_ready_priority = (number << 3) +
__rt_ffs(rt_thread_ready_table[number]) - 1;
192 highest_ready_priority = -1;
194 return highest_ready_priority;
202 number =
__rt_ffs(pcpu->priority_group) - 1;
205 local_highest_ready_priority = (number << 3) +
__rt_ffs(pcpu->ready_table[number]) - 1;
209 local_highest_ready_priority = -1;
211 return local_highest_ready_priority;
218 return __rt_ffs(rt_thread_ready_priority_group) - 1;
223 return __rt_ffs(pcpu->priority_group) - 1;
233 struct rt_thread *highest_priority_thread;
234 rt_ubase_t highest_ready_priority, local_highest_ready_priority;
241 if (highest_ready_priority < local_highest_ready_priority)
243 *highest_prio = highest_ready_priority;
250 *highest_prio = local_highest_ready_priority;
251 if (local_highest_ready_priority != -1)
254 pcpu->priority_table[local_highest_ready_priority].next);
258 highest_priority_thread =
RT_NULL;
264 return highest_priority_thread;
272static void _sched_insert_thread_locked(
struct rt_thread *thread)
300 if (bind_cpu == RT_CPUS_NR)
302#if RT_THREAD_PRIORITY_MAX > 32
305 rt_thread_ready_priority_group |=
RT_SCHED_PRIV(thread).number_mask;
320 cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
321 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
327#if RT_THREAD_PRIORITY_MAX > 32
345 if (cpu_id != bind_cpu)
347 cpu_mask = 1 << bind_cpu;
348 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
352 LOG_D(
"insert thread[%.*s], the priority: %d",
357static void _sched_remove_thread_locked(
struct rt_thread *thread)
359 LOG_D(
"%s [%.*s], the priority: %d", __func__,
370#if RT_THREAD_PRIORITY_MAX > 32
372 if (rt_thread_ready_table[
RT_SCHED_PRIV(thread).number] == 0)
374 rt_thread_ready_priority_group &=
~RT_SCHED_PRIV(thread).number_mask;
377 rt_thread_ready_priority_group &=
~RT_SCHED_PRIV(thread).number_mask;
387#if RT_THREAD_PRIORITY_MAX > 32
408 LOG_D(
"start scheduler: max priority 0x%02x",
409 RT_THREAD_PRIORITY_MAX);
413 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
418 for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
421 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
426 pcpu->irq_switch_flag = 0;
427 pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
429 pcpu->priority_group = 0;
431#if RT_THREAD_PRIORITY_MAX > 32
432 rt_memset(pcpu->ready_table, 0,
sizeof(pcpu->ready_table));
441 rt_thread_ready_priority_group = 0;
443#if RT_THREAD_PRIORITY_MAX > 32
445 rt_memset(rt_thread_ready_table, 0,
sizeof(rt_thread_ready_table));
475 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
479 _sched_remove_thread_locked(to_thread);
485 LOG_D(
"[cpu#%d] switch to priority#%d thread:%.*s(sp:0x%08x)",
562 rc = pcpu->sched_lock_flag;
574static rt_thread_t _prepare_context_switch_locked(
int cpu_id,
582 if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
585 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
637 pcpu->current_priority = (
rt_uint8_t)highest_ready_priority;
642 _sched_remove_thread_locked(to_thread);
664#ifdef RT_USING_SIGNALS
690 extern void rt_thread_handle_sig(
rt_bool_t clean_state);
707#define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr) \
710 SCHEDULER_CONTEXT_LOCK(pcpu); \
711 _sched_thread_preprocess_signal(curthr); \
712 SCHEDULER_CONTEXT_UNLOCK(pcpu); \
714#define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr) \
715 _sched_thread_preprocess_signal(curthr)
716#define SCHED_THREAD_PROCESS_SIGNAL(curthr) _sched_thread_process_signal(curthr)
720#define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr)
721#define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr)
722#define SCHED_THREAD_PROCESS_SIGNAL(curthr)
749 pcpu->irq_switch_flag = 1;
753 return -RT_ESCHEDISR;
766 error = -RT_ESCHEDLOCKED;
776 to_thread = _prepare_context_switch_locked(cpu_id, pcpu,
current_thread);
780 LOG_D(
"[cpu#%d] UNLOCK switch to priority#%d "
781 "thread:%.*s(sp:0x%08x), "
782 "from thread:%.*s(sp: 0x%08x)",
830 pcpu->irq_switch_flag = 1;
854 pcpu->irq_switch_flag = 0;
863 to_thread = _prepare_context_switch_locked(cpu_id, pcpu,
current_thread);
867 LOG_D(
"[cpu#%d] switch to priority#%d "
868 "thread:%.*s(sp:0x%08x), "
869 "from thread:%.*s(sp: 0x%08x)",
914 SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
917 if (pcpu->irq_switch_flag == 0)
926 if (
RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
935 pcpu->irq_switch_flag = 0;
940 to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
943 LOG_D(
"[cpu#%d] IRQ switch to priority#%d "
944 "thread:%.*s(sp:0x%08x), "
945 "from thread:%.*s(sp: 0x%08x)",
948 RT_NAME_MAX, current_thread->
parent.
name, current_thread->
sp);
978void rt_sched_insert_thread(
struct rt_thread *thread)
984 _sched_insert_thread_locked(thread);
994void rt_sched_remove_thread(
struct rt_thread *thread)
1000 _sched_remove_thread_locked(thread);
1012 RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
1018#if RT_THREAD_PRIORITY_MAX > 32
1036void rt_sched_thread_startup(
struct rt_thread *thread)
1038#if RT_THREAD_PRIORITY_MAX > 32
1055void rt_sched_post_ctx_switch(
struct rt_thread *thread)
1074#ifdef RT_DEBUGING_CRITICAL
1076static volatile int _critical_error_occurred = 0;
1087 _critical_error_occurred = 1;
1089 rt_kprintf(
"%s: un-compatible critical level\n" \
1090 "\tCurrent %d\n\tCaller %d\n",
1112#ifdef ARCH_USING_HW_THREAD_SELF
1113#define FREE_THREAD_SELF(lvl)
1116#define FREE_THREAD_SELF(lvl) \
1119 rt_hw_local_irq_enable(lvl); \
1132#ifndef ARCH_USING_HW_THREAD_SELF
1149 FREE_THREAD_SELF(level);
1158 FREE_THREAD_SELF(level);
1160 return critical_level;
1172#ifndef ARCH_USING_HW_THREAD_SELF
1189 FREE_THREAD_SELF(level);
1203 FREE_THREAD_SELF(level);
1213 FREE_THREAD_SELF(level);
1236 critical_lvl =
RT_SCHED_CTX(current_thread).critical_lock_nest;
1244 return critical_lvl;
1255 if (cpu >= RT_CPUS_NR)
1268 rt_sched_remove_thread(thread);
1272 rt_sched_insert_thread(thread);
1291 if (cpu != RT_CPUS_NR)
1296 if (cpu != current_cpu)
1299 rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
1312 rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U <<
RT_SCHED_CTX(thread).oncpu);
struct rt_cpu * rt_cpu_index(int index)
This fucntion will return the cpu object corresponding to index.
struct rt_cpu * rt_cpu_self(void)
This fucntion will return current cpu object.
rt_hw_spinlock_t _cpus_lock
int stat(const char *file, struct stat *buf)
void rt_spin_lock_init(struct rt_spinlock *lock)
Initialize a static spinlock object.
rt_weak rt_bool_t rt_hw_interrupt_is_disabled(void)
#define RT_OBJECT_HOOK_CALL(func, argv)
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_inline int rt_list_isempty(const rt_list_t *l)
tests whether a list is empty
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
int __rt_ffs(int value)
This function finds the first bit set (beginning with the least significant bit) in value and return ...
rt_weak rt_err_t rt_backtrace(void)
Print backtrace of current thread to system console device
void rt_scheduler_sethook(void(*hook)(rt_thread_t from, rt_thread_t to))
void rt_scheduler_ipi_handler(int vector, void *param)
This function will handle IPI interrupt and do a scheduling in system.
void rt_exit_critical_safe(rt_base_t critical_level)
#define RT_THREAD_STAT_SIGNAL_MASK
void rt_exit_critical(void)
This function will unlock the thread scheduler.
#define RT_THREAD_STAT_MASK
void rt_scheduler_switch_sethook(void(*hook)(struct rt_thread *tid))
void rt_system_scheduler_init(void)
This function will initialize the system scheduler.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
#define RT_THREAD_SUSPEND_UNINTERRUPTIBLE
#define RT_THREAD_RUNNING
#define RT_THREAD_STAT_YIELD_MASK
#define RT_SCHEDULER_STACK_CHECK(thr)
void rt_system_scheduler_start(void)
This function will startup the scheduler. It will select one thread with the highest priority level,...
#define RT_THREAD_STAT_SIGNAL_PENDING
rt_err_t rt_thread_resume(rt_thread_t thread)
This function will resume a thread and put it to system ready queue.
void rt_scheduler_do_irq_switch(void *context)
This function checks whether a scheduling is needed after an IRQ context switching....
rt_base_t rt_enter_critical(void)
This function will lock the thread scheduler.
struct rt_thread * rt_thread_t
#define RT_THREAD_SUSPEND
void rt_schedule(void)
This function will perform one scheduling. It will select one thread with the highest priority level ...
rt_uint16_t rt_critical_level(void)
Get the scheduler lock level.
#define rt_atomic_load(ptr)
#define rt_hw_spin_lock(lock)
void rt_hw_context_switch_to(rt_ubase_t to)
#define rt_hw_spin_unlock(lock)
void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
#define rt_hw_local_irq_disable
void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
#define rt_hw_local_irq_enable
#define RTM_EXPORT(symbol)
#define RT_SCHED_CTX(thread)
#define RT_SCHED_DEBUG_IS_LOCKED
#define RT_SCHED_PRIV(thread)
#define RT_THREAD_LIST_NODE(thread)
rt_bool_t rt_sched_is_locked(void)
#define RT_SCHED_DEBUG_IS_UNLOCKED
#define RT_THREAD_LIST_NODE_ENTRY(node)
rt_ubase_t rt_sched_lock_level_t
#define RT_SPIN_UNLOCK_DEBUG(lock, critical)
#define RT_SPIN_LOCK_DEBUG(lock)
unsigned short rt_uint16_t
struct rt_list_node rt_list_t
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
#define SCHEDULER_CONTEXT_LOCK(percpu)
rt_inline void _fast_spin_lock(struct rt_spinlock *lock)
rt_inline void _fast_spin_unlock(struct rt_spinlock *lock)
#define SCHEDULER_UNLOCK(level)
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]
rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu *pcpu)
#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr)
#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr)
#define SCHEDULER_CONTEXT_UNLOCK(percpu)
#define SCHEDULER_LOCK(level)
#define SCHEDULER_ENTER_CRITICAL(curthr)
#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr)
#define SCHEDULER_EXIT_CRITICAL(curthr)
rt_inline rt_base_t _get_global_highest_ready_prio(void)
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
struct rt_thread * current_thread