38#define DBG_TAG "kernel.scheduler"
39#define DBG_LVL DBG_INFO
44#if RT_THREAD_PRIORITY_MAX > 32
53#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
55static void (*rt_scheduler_switch_hook)(
struct rt_thread *tid);
71 rt_scheduler_hook = hook;
82 rt_scheduler_switch_hook = hook;
90 struct rt_thread *highest_priority_thread;
93#if RT_THREAD_PRIORITY_MAX > 32
96 number =
__rt_ffs(rt_thread_ready_priority_group) - 1;
97 highest_ready_priority = (number << 3) +
__rt_ffs(rt_thread_ready_table[number]) - 1;
99 highest_ready_priority =
__rt_ffs(rt_thread_ready_priority_group) - 1;
105 *highest_prio = highest_ready_priority;
107 return highest_priority_thread;
147 rt_scheduler_lock_nest = 0;
149 LOG_D(
"start scheduler: max priority 0x%02x",
150 RT_THREAD_PRIORITY_MAX);
152 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
158 rt_thread_ready_priority_group = 0;
160#if RT_THREAD_PRIORITY_MAX > 32
162 rt_memset(rt_thread_ready_table, 0,
sizeof(rt_thread_ready_table));
175 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
179 rt_sched_remove_thread(to_thread);
212 if (rt_scheduler_lock_nest == 0)
216 if (rt_thread_ready_priority_group != 0)
219 int need_insert_from_thread = 0;
221 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
225 if (
RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority)
227 to_thread = curr_thread;
229 else if (
RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority
232 to_thread = curr_thread;
236 need_insert_from_thread = 1;
240 if (to_thread != curr_thread)
244 from_thread = curr_thread;
249 if (need_insert_from_thread)
251 rt_sched_insert_thread(from_thread);
259 rt_sched_remove_thread(to_thread);
263 LOG_D(
"[%d]switch to priority#%d "
264 "thread:%.*s(sp:0x%08x), "
265 "from thread:%.*s(sp: 0x%08x)",
268 RT_NAME_MAX, from_thread->
parent.
name, from_thread->
sp);
274 extern void rt_thread_handle_sig(
rt_bool_t clean_state);
284#ifdef RT_USING_SIGNALS
289 extern void rt_thread_handle_sig(
rt_bool_t clean_state);
307 LOG_D(
"switch in interrupt");
315 rt_sched_remove_thread(curr_thread);
329void rt_sched_thread_startup(
struct rt_thread *thread)
331#if RT_THREAD_PRIORITY_MAX > 32
348 RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
354#if RT_THREAD_PRIORITY_MAX > 32
372void rt_sched_insert_thread(
struct rt_thread *thread)
403 LOG_D(
"insert thread[%.*s], the priority: %d",
407#if RT_THREAD_PRIORITY_MAX > 32
410 rt_thread_ready_priority_group |=
RT_SCHED_PRIV(thread).number_mask;
424void rt_sched_remove_thread(
struct rt_thread *thread)
433 LOG_D(
"remove thread[%.*s], the priority: %d",
441#if RT_THREAD_PRIORITY_MAX > 32
443 if (rt_thread_ready_table[
RT_SCHED_PRIV(thread).number] == 0)
445 rt_thread_ready_priority_group &=
~RT_SCHED_PRIV(thread).number_mask;
448 rt_thread_ready_priority_group &=
~RT_SCHED_PRIV(thread).number_mask;
456#ifdef RT_DEBUGING_CRITICAL
458static volatile int _critical_error_occurred = 0;
466 if (!_critical_error_occurred)
468 if (critical_level != rt_scheduler_lock_nest)
471 _critical_error_occurred = 1;
473 rt_kprintf(
"%s: un-compatible critical level\n" \
474 "\tCurrent %d\n\tCaller %d\n",
475 __func__, rt_scheduler_lock_nest,
512 rt_scheduler_lock_nest ++;
513 critical_level = rt_scheduler_lock_nest;
518 return critical_level;
532 rt_scheduler_lock_nest --;
533 if (rt_scheduler_lock_nest <= 0)
535 rt_scheduler_lock_nest = 0;
560 return rt_scheduler_lock_nest;
struct rt_cpu * rt_cpu_self(void)
This fucntion will return current cpu object.
int stat(const char *file, struct stat *buf)
volatile rt_atomic_t rt_interrupt_nest
#define RT_OBJECT_HOOK_CALL(func, argv)
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_inline int rt_list_isempty(const rt_list_t *l)
tests whether a list is empty
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
int __rt_ffs(int value)
This function finds the first bit set (beginning with the least significant bit) in value and return ...
rt_weak rt_err_t rt_backtrace(void)
Print backtrace of current thread to system console device
void rt_scheduler_sethook(void(*hook)(rt_thread_t from, rt_thread_t to))
void rt_exit_critical_safe(rt_base_t critical_level)
void rt_exit_critical(void)
This function will unlock the thread scheduler.
#define RT_THREAD_STAT_MASK
void rt_scheduler_switch_sethook(void(*hook)(struct rt_thread *tid))
void rt_system_scheduler_init(void)
This function will initialize the system scheduler.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
#define RT_THREAD_RUNNING
#define RT_THREAD_STAT_YIELD_MASK
#define RT_SCHEDULER_STACK_CHECK(thr)
void rt_system_scheduler_start(void)
This function will startup the scheduler. It will select one thread with the highest priority level,...
#define RT_THREAD_STAT_SIGNAL_PENDING
rt_base_t rt_enter_critical(void)
This function will lock the thread scheduler.
#define RT_THREAD_SUSPEND
void rt_schedule(void)
This function will perform one scheduling. It will select one thread with the highest priority level ...
rt_uint16_t rt_critical_level(void)
Get the scheduler lock level.
#define rt_current_thread
void rt_hw_context_switch_to(rt_ubase_t to)
void rt_hw_interrupt_enable(rt_base_t level)
void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
rt_base_t rt_hw_interrupt_disable(void)
void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
#define RTM_EXPORT(symbol)
#define RT_SCHED_CTX(thread)
#define RT_SCHED_PRIV(thread)
#define RT_THREAD_LIST_NODE(thread)
#define RT_THREAD_LIST_NODE_ENTRY(node)
rt_ubase_t rt_sched_lock_level_t
unsigned short rt_uint16_t
struct rt_list_node rt_list_t
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
rt_uint8_t rt_current_priority
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
struct rt_thread * current_thread