RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
scheduler_mp.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2006-03-17 Bernard the first version
9 * 2006-04-28 Bernard fix the scheduler algorthm
10 * 2006-04-30 Bernard add SCHEDULER_DEBUG
11 * 2006-05-27 Bernard fix the scheduler algorthm for same priority
12 * thread schedule
13 * 2006-06-04 Bernard rewrite the scheduler algorithm
14 * 2006-08-03 Bernard add hook support
15 * 2006-09-05 Bernard add 32 priority level support
16 * 2006-09-24 Bernard add rt_system_scheduler_start function
17 * 2009-09-16 Bernard fix _rt_scheduler_stack_check
18 * 2010-04-11 yi.qiu add module feature
19 * 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
20 * issue found by kuronca
21 * 2010-12-13 Bernard add defunct list initialization even if not use heap.
22 * 2011-05-10 Bernard clean scheduler debug log.
23 * 2013-12-21 Grissiom add rt_critical_level
24 * 2018-11-22 Jesven remove the current task from ready queue
25 * add per cpu ready queue
26 * add _scheduler_get_highest_priority_thread to find highest priority task
27 * rt_schedule_insert_thread won't insert current task to ready queue
28 * in smp version, rt_hw_context_switch_interrupt maybe switch to
29 * new task directly
30 * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to scheduler.c
31 * 2023-03-27 rose_man Split into scheduler upc and scheduler_mp.c
32 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
33 * 2023-12-10 xqyjlj use rt_hw_spinlock
34 * 2024-01-05 Shell Fixup of data racing in rt_critical_level
35 * 2024-01-18 Shell support rt_sched_thread of scheduling status for better mt protection
36 * 2024-01-18 Shell support rt_hw_thread_self to improve overall performance
37 */
38
39#include <rtthread.h>
40#include <rthw.h>
41
42#define DBG_TAG "kernel.scheduler"
43#define DBG_LVL DBG_INFO
44#include <rtdbg.h>
45
46rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
47static struct rt_spinlock _mp_scheduler_lock;
48
49#define SCHEDULER_LOCK_FLAG(percpu) ((percpu)->sched_lock_flag)
50
51#define SCHEDULER_ENTER_CRITICAL(curthr) \
52 do \
53 { \
54 if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest++; \
55 } while (0)
56
57#define SCHEDULER_EXIT_CRITICAL(curthr) \
58 do \
59 { \
60 if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest--; \
61 } while (0)
62
63#define SCHEDULER_CONTEXT_LOCK(percpu) \
64 do \
65 { \
66 RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 0); \
67 _fast_spin_lock(&_mp_scheduler_lock); \
68 SCHEDULER_LOCK_FLAG(percpu) = 1; \
69 } while (0)
70
71#define SCHEDULER_CONTEXT_UNLOCK(percpu) \
72 do \
73 { \
74 RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 1); \
75 SCHEDULER_LOCK_FLAG(percpu) = 0; \
76 _fast_spin_unlock(&_mp_scheduler_lock); \
77 } while (0)
78
79#define SCHEDULER_LOCK(level) \
80 do \
81 { \
82 rt_thread_t _curthr; \
83 struct rt_cpu *_percpu; \
84 level = rt_hw_local_irq_disable(); \
85 _percpu = rt_cpu_self(); \
86 _curthr = _percpu->current_thread; \
87 SCHEDULER_ENTER_CRITICAL(_curthr); \
88 SCHEDULER_CONTEXT_LOCK(_percpu); \
89 } while (0)
90
91#define SCHEDULER_UNLOCK(level) \
92 do \
93 { \
94 rt_thread_t _curthr; \
95 struct rt_cpu *_percpu; \
96 _percpu = rt_cpu_self(); \
97 _curthr = _percpu->current_thread; \
98 SCHEDULER_CONTEXT_UNLOCK(_percpu); \
99 SCHEDULER_EXIT_CRITICAL(_curthr); \
100 rt_hw_local_irq_enable(level); \
101 } while (0)
102
103#ifdef ARCH_USING_HW_THREAD_SELF
104#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag)
105#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 1)
106#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 0)
107
108#else /* !ARCH_USING_HW_THREAD_SELF */
109#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr) ((pcpu)->critical_switch_flag)
110#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 1)
111#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 0)
112
113#endif /* ARCH_USING_HW_THREAD_SELF */
114
115static rt_uint32_t rt_thread_ready_priority_group;
116#if RT_THREAD_PRIORITY_MAX > 32
117/* Maximum priority level, 256 */
118static rt_uint8_t rt_thread_ready_table[32];
119#endif /* RT_THREAD_PRIORITY_MAX > 32 */
120
125rt_inline void _fast_spin_lock(struct rt_spinlock *lock)
126{
127 rt_hw_spin_lock(&lock->lock);
128
130}
131
132rt_inline void _fast_spin_unlock(struct rt_spinlock *lock)
133{
134 rt_base_t critical_level;
135 RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
136
137 /* for the scenario of sched, we don't check critical level */
138 RT_UNUSED(critical_level);
139
140 rt_hw_spin_unlock(&lock->lock);
141}
142
143#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
144static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
145static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
146
150
152
159void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
160{
161 rt_scheduler_hook = hook;
162}
163
170void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
171{
172 rt_scheduler_switch_hook = hook;
173}
174
176#endif /* RT_USING_HOOK */
177
178#if RT_THREAD_PRIORITY_MAX > 32
179
181{
182 rt_ubase_t number;
183 rt_ubase_t highest_ready_priority;
184
185 number = __rt_ffs(rt_thread_ready_priority_group) - 1;
186 if (number != -1)
187 {
188 highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
189 }
190 else
191 {
192 highest_ready_priority = -1;
193 }
194 return highest_ready_priority;
195}
196
197rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu* pcpu)
198{
199 rt_ubase_t number;
200 rt_ubase_t local_highest_ready_priority;
201
202 number = __rt_ffs(pcpu->priority_group) - 1;
203 if (number != -1)
204 {
205 local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
206 }
207 else
208 {
209 local_highest_ready_priority = -1;
210 }
211 return local_highest_ready_priority;
212}
213
214#else /* if RT_THREAD_PRIORITY_MAX <= 32 */
215
217{
218 return __rt_ffs(rt_thread_ready_priority_group) - 1;
219}
220
222{
223 return __rt_ffs(pcpu->priority_group) - 1;
224}
225
226#endif /* RT_THREAD_PRIORITY_MAX > 32 */
227
228/*
229 * get the highest priority thread in ready queue
230 */
231static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
232{
233 struct rt_thread *highest_priority_thread;
234 rt_ubase_t highest_ready_priority, local_highest_ready_priority;
235 struct rt_cpu* pcpu = rt_cpu_self();
236
237 highest_ready_priority = _get_global_highest_ready_prio();
238 local_highest_ready_priority = _get_local_highest_ready_prio(pcpu);
239
240 /* get highest ready priority thread */
241 if (highest_ready_priority < local_highest_ready_priority)
242 {
243 *highest_prio = highest_ready_priority;
244
245 highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
246 rt_thread_priority_table[highest_ready_priority].next);
247 }
248 else
249 {
250 *highest_prio = local_highest_ready_priority;
251 if (local_highest_ready_priority != -1)
252 {
253 highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
254 pcpu->priority_table[local_highest_ready_priority].next);
255 }
256 else
257 {
258 highest_priority_thread = RT_NULL;
259 }
260 }
261
262 RT_ASSERT(!highest_priority_thread ||
263 rt_object_get_type(&highest_priority_thread->parent) == RT_Object_Class_Thread);
264 return highest_priority_thread;
265}
266
272static void _sched_insert_thread_locked(struct rt_thread *thread)
273{
274 int cpu_id;
275 int bind_cpu;
276 rt_uint32_t cpu_mask;
277
279 {
280 /* already in ready queue */
281 return ;
282 }
283 else if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
284 {
289 RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
290 return ;
291 }
292
293 /* READY thread, insert to ready queue */
294 RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
295
296 cpu_id = rt_hw_cpu_id();
297 bind_cpu = RT_SCHED_CTX(thread).bind_cpu;
298
299 /* insert thread to ready list */
300 if (bind_cpu == RT_CPUS_NR)
301 {
302#if RT_THREAD_PRIORITY_MAX > 32
303 rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
304#endif /* RT_THREAD_PRIORITY_MAX > 32 */
305 rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
306
307 /* there is no time slices left(YIELD), inserting thread before ready list*/
308 if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
309 {
311 &RT_THREAD_LIST_NODE(thread));
312 }
313 /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
314 else
315 {
317 &RT_THREAD_LIST_NODE(thread));
318 }
319
320 cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
321 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
322 }
323 else
324 {
325 struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
326
327#if RT_THREAD_PRIORITY_MAX > 32
328 pcpu->ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
329#endif /* RT_THREAD_PRIORITY_MAX > 32 */
330 pcpu->priority_group |= RT_SCHED_PRIV(thread).number_mask;
331
332 /* there is no time slices left(YIELD), inserting thread before ready list*/
333 if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
334 {
335 rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
336 &RT_THREAD_LIST_NODE(thread));
337 }
338 /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
339 else
340 {
341 rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
342 &RT_THREAD_LIST_NODE(thread));
343 }
344
345 if (cpu_id != bind_cpu)
346 {
347 cpu_mask = 1 << bind_cpu;
348 rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
349 }
350 }
351
352 LOG_D("insert thread[%.*s], the priority: %d",
353 RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
354}
355
356/* remove thread from ready queue */
357static void _sched_remove_thread_locked(struct rt_thread *thread)
358{
359 LOG_D("%s [%.*s], the priority: %d", __func__,
360 RT_NAME_MAX, thread->parent.name,
361 RT_SCHED_PRIV(thread).current_priority);
362
363 /* remove thread from ready list */
365
366 if (RT_SCHED_CTX(thread).bind_cpu == RT_CPUS_NR)
367 {
368 if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
369 {
370#if RT_THREAD_PRIORITY_MAX > 32
371 rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
372 if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
373 {
374 rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
375 }
376#else
377 rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
378#endif /* RT_THREAD_PRIORITY_MAX > 32 */
379 }
380 }
381 else
382 {
383 struct rt_cpu *pcpu = rt_cpu_index(RT_SCHED_CTX(thread).bind_cpu);
384
385 if (rt_list_isempty(&(pcpu->priority_table[RT_SCHED_PRIV(thread).current_priority])))
386 {
387#if RT_THREAD_PRIORITY_MAX > 32
388 pcpu->ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
389 if (pcpu->ready_table[RT_SCHED_PRIV(thread).number] == 0)
390 {
391 pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
392 }
393#else
394 pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
395#endif /* RT_THREAD_PRIORITY_MAX > 32 */
396 }
397 }
398}
399
404{
405 int cpu;
406 rt_base_t offset;
407
408 LOG_D("start scheduler: max priority 0x%02x",
409 RT_THREAD_PRIORITY_MAX);
410
411 rt_spin_lock_init(&_mp_scheduler_lock);
412
413 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
414 {
416 }
417
418 for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
419 {
420 struct rt_cpu *pcpu = rt_cpu_index(cpu);
421 for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
422 {
423 rt_list_init(&pcpu->priority_table[offset]);
424 }
425
426 pcpu->irq_switch_flag = 0;
427 pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
428 pcpu->current_thread = RT_NULL;
429 pcpu->priority_group = 0;
430
431#if RT_THREAD_PRIORITY_MAX > 32
432 rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
433#endif /* RT_THREAD_PRIORITY_MAX > 32 */
434
435#ifdef RT_USING_SMART
436 rt_spin_lock_init(&(pcpu->spinlock));
437#endif
438 }
439
440 /* initialize ready priority group */
441 rt_thread_ready_priority_group = 0;
442
443#if RT_THREAD_PRIORITY_MAX > 32
444 /* initialize ready table */
445 rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
446#endif /* RT_THREAD_PRIORITY_MAX > 32 */
447}
448
454{
455 struct rt_thread *to_thread;
456 rt_ubase_t highest_ready_priority;
457
464
465 /* ISR will corrupt the coherency of running frame */
467
472 _fast_spin_lock(&_mp_scheduler_lock);
473
474 /* get the thread scheduling to */
475 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
476 RT_ASSERT(to_thread);
477
478 /* to_thread is picked to running on current core, so remove it from ready queue */
479 _sched_remove_thread_locked(to_thread);
480
481 /* dedigate current core to `to_thread` */
482 RT_SCHED_CTX(to_thread).oncpu = rt_hw_cpu_id();
483 RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
484
485 LOG_D("[cpu#%d] switch to priority#%d thread:%.*s(sp:0x%08x)",
486 rt_hw_cpu_id(), RT_SCHED_PRIV(to_thread).current_priority,
487 RT_NAME_MAX, to_thread->parent.name, to_thread->sp);
488
489 _fast_spin_unlock(&_mp_scheduler_lock);
490
491 /* switch to new thread */
492 rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
493
494 /* never come back */
495}
496
501
503
513void rt_scheduler_ipi_handler(int vector, void *param)
514{
515 rt_schedule();
516}
517
526{
527 rt_base_t level;
528 if (!plvl)
529 return -RT_EINVAL;
530
531 SCHEDULER_LOCK(level);
532 *plvl = level;
533
534 return RT_EOK;
535}
536
546{
547 SCHEDULER_UNLOCK(level);
548
549 return RT_EOK;
550}
551
553{
554 rt_bool_t rc;
555 rt_base_t level;
556 struct rt_cpu *pcpu;
557
558 level = rt_hw_local_irq_disable();
559 pcpu = rt_cpu_self();
560
561 /* get lock stat which is a boolean value */
562 rc = pcpu->sched_lock_flag;
563
565 return rc;
566}
567
574static rt_thread_t _prepare_context_switch_locked(int cpu_id,
575 struct rt_cpu *pcpu,
577{
578 rt_thread_t to_thread = RT_NULL;
579 rt_ubase_t highest_ready_priority;
580
581 /* quickly check if any other ready threads queuing */
582 if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
583 {
584 /* pick the highest ready thread */
585 to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
586
587 /* detach current thread from percpu scheduling context */
588 RT_SCHED_CTX(current_thread).oncpu = RT_CPU_DETACHED;
589
590 /* check if current thread should be put to ready queue, or scheduling again */
592 {
593 /* check if current thread can be running on current core again */
594 if (RT_SCHED_CTX(current_thread).bind_cpu == RT_CPUS_NR
595 || RT_SCHED_CTX(current_thread).bind_cpu == cpu_id)
596 {
597 /* if current_thread is the highest runnable thread */
598 if (RT_SCHED_PRIV(current_thread).current_priority < highest_ready_priority)
599 {
600 to_thread = current_thread;
601 }
602 /* or no higher-priority thread existed and it has remaining ticks */
603 else if (RT_SCHED_PRIV(current_thread).current_priority == highest_ready_priority &&
605 {
606 to_thread = current_thread;
607 }
608 /* otherwise give out the core */
609 else
610 {
611 _sched_insert_thread_locked(current_thread);
612 }
613 }
614 else
615 {
616 /* put current_thread to ready queue of another core */
617 _sched_insert_thread_locked(current_thread);
618 }
619
620 /* consume the yield flags after scheduling */
622 }
623
632 RT_SCHED_CTX(to_thread).oncpu = cpu_id;
633
634 /* check if context switch is required */
635 if (to_thread != current_thread)
636 {
637 pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
638
639 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
640
641 /* remove to_thread from ready queue and update its status to RUNNING */
642 _sched_remove_thread_locked(to_thread);
643 RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
644
645 RT_SCHEDULER_STACK_CHECK(to_thread);
646
647 RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
648 }
649 else
650 {
651 /* current thread is still the best runnable thread */
652 to_thread = RT_NULL;
653 }
654 }
655 else
656 {
657 /* no ready threads */
658 to_thread = RT_NULL;
659 }
660
661 return to_thread;
662}
663
664#ifdef RT_USING_SIGNALS
665static void _sched_thread_preprocess_signal(struct rt_thread *current_thread)
666{
667 /* should process signal? */
669 {
670 /* if current_thread signal is in pending */
672 {
673#ifdef RT_USING_SMART
674 rt_thread_wakeup(current_thread);
675#else
677#endif
678 }
679 }
680}
681
682static void _sched_thread_process_signal(struct rt_thread *current_thread)
683{
684 rt_base_t level;
685 SCHEDULER_LOCK(level);
686
687 /* check stat of thread for signal */
689 {
690 extern void rt_thread_handle_sig(rt_bool_t clean_state);
691
693
694 SCHEDULER_UNLOCK(level);
695
696 /* check signal status */
697 rt_thread_handle_sig(RT_TRUE);
698 }
699 else
700 {
701 SCHEDULER_UNLOCK(level);
702 }
703
704 /* lock is released above */
705}
706
707#define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr) \
708 do \
709 { \
710 SCHEDULER_CONTEXT_LOCK(pcpu); \
711 _sched_thread_preprocess_signal(curthr); \
712 SCHEDULER_CONTEXT_UNLOCK(pcpu); \
713 } while (0)
714#define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr) \
715 _sched_thread_preprocess_signal(curthr)
716#define SCHED_THREAD_PROCESS_SIGNAL(curthr) _sched_thread_process_signal(curthr)
717
718#else /* ! RT_USING_SIGNALS */
719
720#define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr)
721#define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr)
722#define SCHED_THREAD_PROCESS_SIGNAL(curthr)
723#endif /* RT_USING_SIGNALS */
724
726{
727 struct rt_thread *to_thread;
728 struct rt_thread *current_thread;
729 struct rt_cpu *pcpu;
730 int cpu_id;
731 rt_err_t error = RT_EOK;
732
733 cpu_id = rt_hw_cpu_id();
734 pcpu = rt_cpu_index(cpu_id);
736
737 if (!current_thread)
738 {
739 /* scheduler is unavailable yet */
743 return -RT_EBUSY;
744 }
745
746 /* whether do switch in interrupt */
747 if (rt_atomic_load(&(pcpu->irq_nest)))
748 {
749 pcpu->irq_switch_flag = 1;
753 return -RT_ESCHEDISR;
754 }
755
756 /* prepare current_thread for processing if signals existed */
757 SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(current_thread);
758
759 /* whether caller had locked the local scheduler already */
760 if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
761 {
762 /* leaving critical region of global context since we can't schedule */
764
766 error = -RT_ESCHEDLOCKED;
767
769 }
770 else
771 {
772 /* flush critical switch flag since a scheduling is done */
774
775 /* pick the highest runnable thread, and pass the control to it */
776 to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
777 if (to_thread)
778 {
779 /* switch to new thread */
780 LOG_D("[cpu#%d] UNLOCK switch to priority#%d "
781 "thread:%.*s(sp:0x%08x), "
782 "from thread:%.*s(sp: 0x%08x)",
783 cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
784 RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
785 RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
786
788 (rt_ubase_t)&to_thread->sp, to_thread);
789 }
790 else
791 {
794 }
795 }
796
797 /* leaving critical region of percpu scheduling context */
799
800 /* process signals on thread if any existed */
801 SCHED_THREAD_PROCESS_SIGNAL(current_thread);
802
803 return error;
804}
805
811void rt_schedule(void)
812{
813 rt_base_t level;
814 struct rt_thread *to_thread;
815 struct rt_thread *current_thread;
816 struct rt_cpu *pcpu;
817 int cpu_id;
818
819 /* enter ciritical region of percpu scheduling context */
820 level = rt_hw_local_irq_disable();
821
822 /* get percpu scheduling context */
823 cpu_id = rt_hw_cpu_id();
824 pcpu = rt_cpu_index(cpu_id);
826
827 /* whether do switch in interrupt */
828 if (rt_atomic_load(&(pcpu->irq_nest)))
829 {
830 pcpu->irq_switch_flag = 1;
832 return ; /* -RT_ESCHEDISR */
833 }
834
835 /* forbid any recursive entries of schedule() */
837
838 /* prepare current_thread for processing if signals existed */
839 SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
840
841 /* whether caller had locked the local scheduler already */
842 if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
843 {
845
847
848 /* -RT_ESCHEDLOCKED */
849 }
850 else
851 {
852 /* flush critical switch flag since a scheduling is done */
854 pcpu->irq_switch_flag = 0;
855
861
862 /* pick the highest runnable thread, and pass the control to it */
863 to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
864
865 if (to_thread)
866 {
867 LOG_D("[cpu#%d] switch to priority#%d "
868 "thread:%.*s(sp:0x%08x), "
869 "from thread:%.*s(sp: 0x%08x)",
870 cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
871 RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
872 RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
873
875 (rt_ubase_t)&to_thread->sp, to_thread);
876 }
877 else
878 {
879 /* current thread continue to take the core */
882 }
883 }
884
885 /* leaving critical region of percpu scheduling context */
887
888 /* process signals on thread if any existed */
889 SCHED_THREAD_PROCESS_SIGNAL(current_thread);
890}
891
897void rt_scheduler_do_irq_switch(void *context)
898{
899 int cpu_id;
900 rt_base_t level;
901 struct rt_cpu *pcpu;
902 struct rt_thread *to_thread;
903 struct rt_thread *current_thread;
904
905 level = rt_hw_local_irq_disable();
906
907 cpu_id = rt_hw_cpu_id();
908 pcpu = rt_cpu_index(cpu_id);
909 current_thread = pcpu->current_thread;
910
911 /* forbid any recursive entries of schedule() */
912 SCHEDULER_ENTER_CRITICAL(current_thread);
913
914 SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
915
916 /* any pending scheduling existed? */
917 if (pcpu->irq_switch_flag == 0)
918 {
919 /* if no, just continue execution of current_thread */
920 SCHEDULER_EXIT_CRITICAL(current_thread);
922 return;
923 }
924
925 /* whether caller had locked the local scheduler already */
926 if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
927 {
928 SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
929 SCHEDULER_EXIT_CRITICAL(current_thread);
930 }
931 else if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
932 {
933 /* flush critical & irq switch flag since a scheduling is done */
934 CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
935 pcpu->irq_switch_flag = 0;
936
938
939 /* pick the highest runnable thread, and pass the control to it */
940 to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
941 if (to_thread)
942 {
943 LOG_D("[cpu#%d] IRQ switch to priority#%d "
944 "thread:%.*s(sp:0x%08x), "
945 "from thread:%.*s(sp: 0x%08x)",
946 cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
947 RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
948 RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
949
950 rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
951 (rt_ubase_t)&to_thread->sp, to_thread);
952 }
953 else
954 {
955 /* current thread continue to take the core */
957 SCHEDULER_EXIT_CRITICAL(current_thread);
958 }
959 }
960 else
961 {
962 SCHEDULER_EXIT_CRITICAL(current_thread);
963 }
964
965 /* leaving critical region of percpu scheduling context */
967}
968
978void rt_sched_insert_thread(struct rt_thread *thread)
979{
980 RT_ASSERT(thread != RT_NULL);
982
983 /* set READY and insert thread to ready queue */
984 _sched_insert_thread_locked(thread);
985}
986
994void rt_sched_remove_thread(struct rt_thread *thread)
995{
996 RT_ASSERT(thread != RT_NULL);
998
999 /* remove thread from scheduler ready list */
1000 _sched_remove_thread_locked(thread);
1001
1003}
1004
1005/* thread status initialization and setting up on startup */
1006
1007void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
1008{
1010
1011 /* priority init */
1012 RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
1013 RT_SCHED_PRIV(thread).init_priority = priority;
1014 RT_SCHED_PRIV(thread).current_priority = priority;
1015
1016 /* don't add to scheduler queue as init thread */
1017 RT_SCHED_PRIV(thread).number_mask = 0;
1018#if RT_THREAD_PRIORITY_MAX > 32
1019 RT_SCHED_PRIV(thread).number = 0;
1020 RT_SCHED_PRIV(thread).high_mask = 0;
1021#endif /* RT_THREAD_PRIORITY_MAX > 32 */
1022
1023 /* tick init */
1024 RT_SCHED_PRIV(thread).init_tick = tick;
1025 RT_SCHED_PRIV(thread).remaining_tick = tick;
1026
1027#ifdef RT_USING_SMP
1028
1029 /* lock init */
1030 RT_SCHED_CTX(thread).critical_lock_nest = 0;
1031#endif /* RT_USING_SMP */
1032
1033}
1034
1035/* Normally, there isn't anyone racing with us so this operation is lockless */
1036void rt_sched_thread_startup(struct rt_thread *thread)
1037{
1038#if RT_THREAD_PRIORITY_MAX > 32
1039 RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
1040 RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
1041 RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
1042#else
1043 RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
1044#endif /* RT_THREAD_PRIORITY_MAX > 32 */
1045
1046 /* change thread stat, so we can resume it */
1047 RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
1048}
1049
1055void rt_sched_post_ctx_switch(struct rt_thread *thread)
1056{
1057 struct rt_cpu* pcpu = rt_cpu_self();
1058 rt_thread_t from_thread = pcpu->current_thread;
1059
1061
1062 if (from_thread)
1063 {
1064 RT_ASSERT(RT_SCHED_CTX(from_thread).critical_lock_nest == 1);
1065
1066 /* release the scheduler lock since we are done with critical region */
1067 RT_SCHED_CTX(from_thread).critical_lock_nest = 0;
1069 }
1070 /* safe to access since irq is masked out */
1071 pcpu->current_thread = thread;
1072}
1073
1074#ifdef RT_DEBUGING_CRITICAL
1075
1076static volatile int _critical_error_occurred = 0;
1077
1078void rt_exit_critical_safe(rt_base_t critical_level)
1079{
1080 struct rt_cpu *pcpu = rt_cpu_self();
1082 if (current_thread && !_critical_error_occurred)
1083 {
1084 if (critical_level != RT_SCHED_CTX(current_thread).critical_lock_nest)
1085 {
1086 int dummy = 1;
1087 _critical_error_occurred = 1;
1088
1089 rt_kprintf("%s: un-compatible critical level\n" \
1090 "\tCurrent %d\n\tCaller %d\n",
1091 __func__, RT_SCHED_CTX(current_thread).critical_lock_nest,
1092 critical_level);
1093 rt_backtrace();
1094
1095 while (dummy) ;
1096 }
1097 }
1099}
1100
1101#else /* !RT_DEBUGING_CRITICAL */
1102
1103void rt_exit_critical_safe(rt_base_t critical_level)
1104{
1105 RT_UNUSED(critical_level);
1106 return rt_exit_critical();
1107}
1108
1109#endif /* RT_DEBUGING_CRITICAL */
1111
1112#ifdef ARCH_USING_HW_THREAD_SELF
1113#define FREE_THREAD_SELF(lvl)
1114
1115#else /* !ARCH_USING_HW_THREAD_SELF */
1116#define FREE_THREAD_SELF(lvl) \
1117 do \
1118 { \
1119 rt_hw_local_irq_enable(lvl); \
1120 } while (0)
1121
1122#endif /* ARCH_USING_HW_THREAD_SELF */
1123
1128{
1129 rt_base_t critical_level;
1130 struct rt_thread *current_thread;
1131
1132#ifndef ARCH_USING_HW_THREAD_SELF
1133 rt_base_t level;
1134 struct rt_cpu *pcpu;
1135
1136 /* disable interrupt */
1137 level = rt_hw_local_irq_disable();
1138
1139 pcpu = rt_cpu_self();
1141
1142#else /* !ARCH_USING_HW_THREAD_SELF */
1143 current_thread = rt_hw_thread_self();
1144
1145#endif /* ARCH_USING_HW_THREAD_SELF */
1146
1147 if (!current_thread)
1148 {
1149 FREE_THREAD_SELF(level);
1150 /* scheduler unavailable */
1151 return -RT_EINVAL;
1152 }
1153
1154 /* critical for local cpu */
1155 RT_SCHED_CTX(current_thread).critical_lock_nest++;
1156 critical_level = RT_SCHED_CTX(current_thread).critical_lock_nest;
1157
1158 FREE_THREAD_SELF(level);
1159
1160 return critical_level;
1161}
1163
1167void rt_exit_critical(void)
1168{
1169 struct rt_thread *current_thread;
1170 rt_bool_t need_resched;
1171
1172#ifndef ARCH_USING_HW_THREAD_SELF
1173 rt_base_t level;
1174 struct rt_cpu *pcpu;
1175
1176 /* disable interrupt */
1177 level = rt_hw_local_irq_disable();
1178
1179 pcpu = rt_cpu_self();
1181
1182#else /* !ARCH_USING_HW_THREAD_SELF */
1183 current_thread = rt_hw_thread_self();
1184
1185#endif /* ARCH_USING_HW_THREAD_SELF */
1186
1187 if (!current_thread)
1188 {
1189 FREE_THREAD_SELF(level);
1190 return;
1191 }
1192
1193 /* the necessary memory barrier is done on irq_(dis|en)able */
1194 RT_SCHED_CTX(current_thread).critical_lock_nest--;
1195
1196 /* may need a rescheduling */
1197 if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
1198 {
1199 /* is there any scheduling request unfinished? */
1200 need_resched = IS_CRITICAL_SWITCH_PEND(pcpu, current_thread);
1202
1203 FREE_THREAD_SELF(level);
1204
1205 if (need_resched)
1206 rt_schedule();
1207 }
1208 else
1209 {
1210 /* each exit_critical is strictly corresponding to an enter_critical */
1211 RT_ASSERT(RT_SCHED_CTX(current_thread).critical_lock_nest > 0);
1212
1213 FREE_THREAD_SELF(level);
1214 }
1215}
1217
1224{
1225 rt_base_t level;
1226 rt_uint16_t critical_lvl;
1227 struct rt_thread *current_thread;
1228
1229 level = rt_hw_local_irq_disable();
1230
1231 current_thread = rt_cpu_self()->current_thread;
1232
1233 if (current_thread)
1234 {
1235 /* the necessary memory barrier is done on irq_(dis|en)able */
1236 critical_lvl = RT_SCHED_CTX(current_thread).critical_lock_nest;
1237 }
1238 else
1239 {
1240 critical_lvl = 0;
1241 }
1242
1244 return critical_lvl;
1245}
1247
1248rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
1249{
1251 rt_uint8_t thread_stat;
1252
1254
1255 if (cpu >= RT_CPUS_NR)
1256 {
1257 cpu = RT_CPUS_NR;
1258 }
1259
1260 rt_sched_lock(&slvl);
1261
1262 thread_stat = rt_sched_thread_get_stat(thread);
1263
1264 if (thread_stat == RT_THREAD_READY)
1265 {
1266 /* unbind */
1267 /* remove from old ready queue */
1268 rt_sched_remove_thread(thread);
1269 /* change thread bind cpu */
1270 RT_SCHED_CTX(thread).bind_cpu = cpu;
1271 /* add to new ready queue */
1272 rt_sched_insert_thread(thread);
1273
1274 if (rt_thread_self() != RT_NULL)
1275 {
1277 }
1278 else
1279 {
1280 rt_sched_unlock(slvl);
1281 }
1282 }
1283 else
1284 {
1285 RT_SCHED_CTX(thread).bind_cpu = cpu;
1286 if (thread_stat == RT_THREAD_RUNNING)
1287 {
1288 /* thread is running on a cpu */
1289 int current_cpu = rt_hw_cpu_id();
1290
1291 if (cpu != RT_CPUS_NR)
1292 {
1293 if (RT_SCHED_CTX(thread).oncpu == current_cpu)
1294 {
1295 /* current thread on current cpu */
1296 if (cpu != current_cpu)
1297 {
1298 /* bind to other cpu */
1299 rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
1300 /* self cpu need reschedule */
1302 }
1303 else
1304 {
1305 /* else do nothing */
1306 rt_sched_unlock(slvl);
1307 }
1308 }
1309 else
1310 {
1311 /* no running on self cpu, but dest cpu can be itself */
1312 rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << RT_SCHED_CTX(thread).oncpu);
1313 rt_sched_unlock(slvl);
1314 }
1315 }
1316 else
1317 {
1318 /* else do nothing */
1319 rt_sched_unlock(slvl);
1320 }
1321 }
1322 else
1323 {
1324 rt_sched_unlock(slvl);
1325 }
1326 }
1327
1328 return RT_EOK;
1329}
1330
struct rt_cpu * rt_cpu_index(int index)
This fucntion will return the cpu object corresponding to index.
struct rt_cpu * rt_cpu_self(void)
This fucntion will return current cpu object.
rt_hw_spinlock_t _cpus_lock
int stat(const char *file, struct stat *buf)
void rt_spin_lock_init(struct rt_spinlock *lock)
Initialize a static spinlock object.
rt_weak rt_bool_t rt_hw_interrupt_is_disabled(void)
定义 irq.c:151
#define RT_OBJECT_HOOK_CALL(func, argv)
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
@ RT_Object_Class_Thread
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_inline int rt_list_isempty(const rt_list_t *l)
tests whether a list is empty
#define rt_kprintf(...)
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
#define RT_ASSERT(EX)
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
int __rt_ffs(int value)
This function finds the first bit set (beginning with the least significant bit) in value and return ...
rt_weak rt_err_t rt_backtrace(void)
Print backtrace of current thread to system console device
void rt_scheduler_sethook(void(*hook)(rt_thread_t from, rt_thread_t to))
void rt_scheduler_ipi_handler(int vector, void *param)
This function will handle IPI interrupt and do a scheduling in system.
void rt_exit_critical_safe(rt_base_t critical_level)
#define RT_THREAD_STAT_SIGNAL_MASK
void rt_exit_critical(void)
This function will unlock the thread scheduler.
#define RT_THREAD_READY
#define RT_THREAD_STAT_MASK
void rt_scheduler_switch_sethook(void(*hook)(struct rt_thread *tid))
void rt_system_scheduler_init(void)
This function will initialize the system scheduler.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
#define RT_THREAD_SUSPEND_UNINTERRUPTIBLE
#define RT_THREAD_RUNNING
#define RT_THREAD_STAT_YIELD_MASK
#define RT_SCHEDULER_STACK_CHECK(thr)
void rt_system_scheduler_start(void)
This function will startup the scheduler. It will select one thread with the highest priority level,...
#define RT_THREAD_STAT_SIGNAL_PENDING
rt_err_t rt_thread_resume(rt_thread_t thread)
This function will resume a thread and put it to system ready queue.
void rt_scheduler_do_irq_switch(void *context)
This function checks whether a scheduling is needed after an IRQ context switching....
rt_base_t rt_enter_critical(void)
This function will lock the thread scheduler.
struct rt_thread * rt_thread_t
#define RT_THREAD_SUSPEND
void rt_schedule(void)
This function will perform one scheduling. It will select one thread with the highest priority level ...
rt_uint16_t rt_critical_level(void)
Get the scheduler lock level.
#define rt_atomic_load(ptr)
#define LOG_D(...)
#define RT_UNUSED(x)
#define rt_hw_spin_lock(lock)
定义 rthw.h:235
void rt_hw_context_switch_to(rt_ubase_t to)
int rt_hw_cpu_id(void)
#define rt_hw_spin_unlock(lock)
定义 rthw.h:236
void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
#define rt_hw_local_irq_disable
定义 rthw.h:152
void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to)
#define rt_hw_local_irq_enable
定义 rthw.h:153
#define RTM_EXPORT(symbol)
定义 rtm.h:33
#define RT_SCHED_CTX(thread)
#define RT_SCHED_DEBUG_IS_LOCKED
#define RT_SCHED_PRIV(thread)
#define RT_THREAD_LIST_NODE(thread)
rt_bool_t rt_sched_is_locked(void)
#define RT_SCHED_DEBUG_IS_UNLOCKED
#define RT_THREAD_LIST_NODE_ENTRY(node)
rt_ubase_t rt_sched_lock_level_t
rt_int32_t rt_base_t
#define RT_SPIN_UNLOCK_DEBUG(lock, critical)
int rt_bool_t
rt_base_t rt_err_t
#define RT_SPIN_LOCK_DEBUG(lock)
unsigned char rt_uint8_t
unsigned short rt_uint16_t
#define RT_TRUE
struct rt_list_node rt_list_t
unsigned int rt_uint32_t
rt_uint32_t rt_ubase_t
#define RT_NULL
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
#define SCHEDULER_CONTEXT_LOCK(percpu)
rt_inline void _fast_spin_lock(struct rt_spinlock *lock)
rt_inline void _fast_spin_unlock(struct rt_spinlock *lock)
#define SCHEDULER_UNLOCK(level)
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]
rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu *pcpu)
#define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr)
#define IS_CRITICAL_SWITCH_PEND(pcpu, curthr)
#define SCHEDULER_CONTEXT_UNLOCK(percpu)
#define SCHEDULER_LOCK(level)
#define SCHEDULER_ENTER_CRITICAL(curthr)
#define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr)
#define SCHEDULER_EXIT_CRITICAL(curthr)
rt_inline rt_base_t _get_global_highest_ready_prio(void)
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
struct rt_thread * current_thread
const char * name
rt_ubase_t lock
struct rt_object parent
void * sp