RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
thread.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2006-03-28 Bernard first version
9 * 2006-04-29 Bernard implement thread timer
10 * 2006-04-30 Bernard added THREAD_DEBUG
11 * 2006-05-27 Bernard fixed the rt_thread_yield bug
12 * 2006-06-03 Bernard fixed the thread timer init bug
13 * 2006-08-10 Bernard fixed the timer bug in thread_sleep
14 * 2006-09-03 Bernard changed rt_timer_delete to rt_timer_detach
15 * 2006-09-03 Bernard implement rt_thread_detach
16 * 2008-02-16 Bernard fixed the rt_thread_timeout bug
17 * 2010-03-21 Bernard change the errno of rt_thread_delay/sleep to
18 * RT_EOK.
19 * 2010-11-10 Bernard add cleanup callback function in thread exit.
20 * 2011-09-01 Bernard fixed rt_thread_exit issue when the current
21 * thread preempted, which reported by Jiaxing Lee.
22 * 2011-09-08 Bernard fixed the scheduling issue in rt_thread_startup.
23 * 2012-12-29 Bernard fixed compiling warning.
24 * 2016-08-09 ArdaFu add thread suspend and resume hook.
25 * 2017-04-10 armink fixed the rt_thread_delete and rt_thread_detach
26 * bug when thread has not startup.
27 * 2018-11-22 Jesven yield is same to rt_schedule
28 * add support for tasks bound to cpu
29 * 2021-02-24 Meco Man rearrange rt_thread_control() - schedule the thread when close it
30 * 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
31 * 2021-12-27 Meco Man remove .init_priority
32 * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to thread.c
33 * 2022-01-24 THEWON let _thread_sleep return thread->error when using signal
34 * 2022-10-15 Bernard add nested mutex feature
35 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
36 * 2023-12-10 xqyjlj fix thread_exit/detach/delete
37 * fix rt_thread_delay
38 */
39
40#include <rthw.h>
41#include <rtthread.h>
42#include <stddef.h>
43
44#define DBG_TAG "kernel.thread"
45#define DBG_LVL DBG_INFO
46#include <rtdbg.h>
47
48#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
49static void (*rt_thread_suspend_hook)(rt_thread_t thread);
50static void (*rt_thread_resume_hook) (rt_thread_t thread);
51
59void rt_thread_suspend_sethook(void (*hook)(rt_thread_t thread))
60{
61 rt_thread_suspend_hook = hook;
62}
63
71void rt_thread_resume_sethook(void (*hook)(rt_thread_t thread))
72{
73 rt_thread_resume_hook = hook;
74}
75
76RT_OBJECT_HOOKLIST_DEFINE(rt_thread_inited);
77#endif /* defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR) */
78
79#ifdef RT_USING_MUTEX
80static void _thread_detach_from_mutex(rt_thread_t thread)
81{
82 rt_list_t *node;
83 rt_list_t *tmp_list;
84 struct rt_mutex *mutex;
85 rt_base_t level;
86
87 level = rt_spin_lock_irqsave(&thread->spinlock);
88
89 /* check if thread is waiting on a mutex */
90 if ((thread->pending_object) &&
92 {
93 /* remove it from its waiting list */
94 struct rt_mutex *mutex = (struct rt_mutex*)thread->pending_object;
95 rt_mutex_drop_thread(mutex, thread);
96 thread->pending_object = RT_NULL;
97 }
98
99 /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
100 rt_list_for_each_safe(node, tmp_list, &(thread->taken_object_list))
101 {
102 mutex = rt_list_entry(node, struct rt_mutex, taken_list);
103 LOG_D("Thread [%s] exits while holding mutex [%s].\n", thread->parent.name, mutex->parent.parent.name);
104 /* recursively take */
105 mutex->hold = 1;
106 rt_mutex_release(mutex);
107 }
108
109 rt_spin_unlock_irqrestore(&thread->spinlock, level);
110}
111
112#else
113
114static void _thread_detach_from_mutex(rt_thread_t thread) {}
115#endif
116
117static void _thread_exit(void)
118{
119 struct rt_thread *thread;
120 rt_base_t critical_level;
121
122 /* get current thread */
123 thread = rt_thread_self();
124
125 critical_level = rt_enter_critical();
126
127 rt_thread_close(thread);
128
129 _thread_detach_from_mutex(thread);
130
131 /* insert to defunct thread list */
133
134 rt_exit_critical_safe(critical_level);
135
136 /* switch to next task */
137 rt_schedule();
138}
139
146static void _thread_timeout(void *parameter)
147{
148 struct rt_thread *thread;
150
151 thread = (struct rt_thread *)parameter;
152
153 /* parameter check */
154 RT_ASSERT(thread != RT_NULL);
156
157 rt_sched_lock(&slvl);
158
164
165 /* set error number */
166 thread->error = -RT_ETIMEOUT;
167
168 /* remove from suspend list */
170 /* insert to schedule ready list */
171 rt_sched_insert_thread(thread);
172 /* do schedule and release the scheduler lock */
174}
175
176static rt_err_t _thread_init(struct rt_thread *thread,
177 const char *name,
178 void (*entry)(void *parameter),
179 void *parameter,
180 void *stack_start,
182 rt_uint8_t priority,
183 rt_uint32_t tick)
184{
185 RT_UNUSED(name);
186
187 rt_sched_thread_init_ctx(thread, tick, priority);
188
189#ifdef RT_USING_MEM_PROTECTION
190 thread->mem_regions = RT_NULL;
191#endif
192
193#ifdef RT_USING_SMART
194 thread->wakeup_handle.func = RT_NULL;
195#endif
196
197 thread->entry = (void *)entry;
198 thread->parameter = parameter;
199
200 /* stack init */
201 thread->stack_addr = stack_start;
202 thread->stack_size = stack_size;
203
204 /* init thread stack */
205 rt_memset(thread->stack_addr, '#', thread->stack_size);
206#ifdef RT_USING_HW_STACK_GUARD
207 rt_hw_stack_guard_init(thread);
208#endif
209#ifdef ARCH_CPU_STACK_GROWS_UPWARD
210 thread->sp = (void *)rt_hw_stack_init(thread->entry, thread->parameter,
211 (void *)((char *)thread->stack_addr),
212 (void *)_thread_exit);
213#else
214 thread->sp = (void *)rt_hw_stack_init(thread->entry, thread->parameter,
215 (rt_uint8_t *)((char *)thread->stack_addr + thread->stack_size - sizeof(rt_ubase_t)),
216 (void *)_thread_exit);
217#endif /* ARCH_CPU_STACK_GROWS_UPWARD */
218
219#ifdef RT_USING_MUTEX
221 thread->pending_object = RT_NULL;
222#endif
223
224#ifdef RT_USING_EVENT
225 thread->event_set = 0;
226 thread->event_info = 0;
227#endif /* RT_USING_EVENT */
228
229 /* error and flags */
230 thread->error = RT_EOK;
231
232 /* lock init */
233#ifdef RT_USING_SMP
234 rt_atomic_store(&thread->cpus_lock_nest, 0);
235#endif
236
237 /* initialize cleanup function and user data */
238 thread->cleanup = 0;
239 thread->user_data = 0;
240
241 /* initialize thread timer */
242 rt_timer_init(&(thread->thread_timer),
243 thread->parent.name,
244 _thread_timeout,
245 thread,
246 0,
248
249 /* initialize signal */
250#ifdef RT_USING_SIGNALS
251 thread->sig_mask = 0x00;
252 thread->sig_pending = 0x00;
253
254#ifndef RT_USING_SMP
255 thread->sig_ret = RT_NULL;
256#endif /* RT_USING_SMP */
257 thread->sig_vectors = RT_NULL;
258 thread->si_list = RT_NULL;
259#endif /* RT_USING_SIGNALS */
260
261#ifdef RT_USING_SMART
262 thread->tid_ref_count = 0;
263 thread->lwp = RT_NULL;
264 thread->susp_recycler = RT_NULL;
265 thread->robust_list = RT_NULL;
266 rt_list_init(&(thread->sibling));
267
268 /* lwp thread-signal init */
269 rt_memset(&thread->signal.sigset_mask, 0, sizeof(lwp_sigset_t));
270 rt_memset(&thread->signal.sig_queue.sigset_pending, 0, sizeof(lwp_sigset_t));
271 rt_list_init(&thread->signal.sig_queue.siginfo_list);
272
273 rt_memset(&thread->user_ctx, 0, sizeof thread->user_ctx);
274
275 /* initialize user_time and system_time */
276 thread->user_time = 0;
277 thread->system_time = 0;
278#endif
279
280#ifdef RT_USING_CPU_USAGE
281 thread->duration_tick = 0;
282#endif /* RT_USING_CPU_USAGE */
283
284#ifdef RT_USING_PTHREADS
285 thread->pthread_data = RT_NULL;
286#endif /* RT_USING_PTHREADS */
287
288#ifdef RT_USING_MODULE
289 thread->parent.module_id = 0;
290#endif /* RT_USING_MODULE */
291
292 rt_spin_lock_init(&thread->spinlock);
293
294 RT_OBJECT_HOOKLIST_CALL(rt_thread_inited, (thread));
295
296 return RT_EOK;
297}
298
302
304
329 const char *name,
330 void (*entry)(void *parameter),
331 void *parameter,
332 void *stack_start,
334 rt_uint8_t priority,
335 rt_uint32_t tick)
336{
337 /* parameter check */
338 RT_ASSERT(thread != RT_NULL);
339 RT_ASSERT(stack_start != RT_NULL);
340 RT_ASSERT(tick != 0);
341
342 /* clean memory data of thread */
343 rt_memset(thread, 0x0, sizeof(struct rt_thread));
344
345 /* initialize thread object */
347
348 return _thread_init(thread,
349 name,
350 entry,
351 parameter,
352 stack_start,
354 priority,
355 tick);
356}
358
365{
366#ifndef RT_USING_SMP
367 return rt_cpu_self()->current_thread;
368
369#elif defined (ARCH_USING_HW_THREAD_SELF)
370 return rt_hw_thread_self();
371
372#else /* !ARCH_USING_HW_THREAD_SELF */
373 rt_thread_t self;
374 rt_base_t lock;
375
377 self = rt_cpu_self()->current_thread;
379
380 return self;
381#endif /* ARCH_USING_HW_THREAD_SELF */
382}
384
394{
395 /* parameter check */
396 RT_ASSERT(thread != RT_NULL);
399
400 LOG_D("startup a thread:%s with priority:%d",
401 thread->parent.name, thread->current_priority);
402
403 /* calculate priority attribute and reset thread stat to suspend */
404 rt_sched_thread_startup(thread);
405
406 /* resume and do a schedule if scheduler is available */
407 rt_thread_resume(thread);
408
409 return RT_EOK;
410}
412
425{
427 rt_uint8_t thread_status;
428
429 /* forbid scheduling on current core if closing current thread */
431
432 /* before checking status of scheduler */
433 rt_sched_lock(&slvl);
434
435 /* check if thread is already closed */
436 thread_status = rt_sched_thread_get_stat(thread);
437 if (thread_status != RT_THREAD_CLOSE)
438 {
439 if (thread_status != RT_THREAD_INIT)
440 {
441 /* remove from schedule */
442 rt_sched_remove_thread(thread);
443 }
444
445 /* release thread timer */
446 rt_timer_detach(&(thread->thread_timer));
447
448 /* change stat */
449 rt_sched_thread_close(thread);
450 }
451
452 /* scheduler works are done */
453 rt_sched_unlock(slvl);
454
455 return RT_EOK;
456}
458
459static rt_err_t _thread_detach(rt_thread_t thread);
460
471{
472 /* parameter check */
473 RT_ASSERT(thread != RT_NULL);
476
477 return _thread_detach(thread);
478}
480
481static rt_err_t _thread_detach(rt_thread_t thread)
482{
484 rt_base_t critical_level;
485
490 critical_level = rt_enter_critical();
491
492 error = rt_thread_close(thread);
493
494 _thread_detach_from_mutex(thread);
495
496 /* insert to defunct thread list */
498
499 rt_exit_critical_safe(critical_level);
500 return error;
501}
502
503#ifdef RT_USING_HEAP
524 void (*entry)(void *parameter),
525 void *parameter,
527 rt_uint8_t priority,
528 rt_uint32_t tick)
529{
530 /* parameter check */
531 RT_ASSERT(tick != 0);
532
533 struct rt_thread *thread;
534 void *stack_start;
535
537 name);
538 if (thread == RT_NULL)
539 return RT_NULL;
540
541 stack_start = (void *)RT_KERNEL_MALLOC(stack_size);
542 if (stack_start == RT_NULL)
543 {
544 /* allocate stack failure */
546
547 return RT_NULL;
548 }
549
550 _thread_init(thread,
551 name,
552 entry,
553 parameter,
554 stack_start,
556 priority,
557 tick);
558
559 return thread;
560}
562
573{
574 /* parameter check */
575 RT_ASSERT(thread != RT_NULL);
578
579 return _thread_detach(thread);
580}
582#endif /* RT_USING_HEAP */
583
593{
595 rt_sched_lock(&slvl);
596
598
600
601 return RT_EOK;
602}
604
614static rt_err_t _thread_sleep(rt_tick_t tick)
615{
616 struct rt_thread *thread;
617 rt_base_t critical_level;
618 int err;
619
620 if (tick == 0)
621 {
622 return -RT_EINVAL;
623 }
624
625 /* set to current thread */
626 thread = rt_thread_self();
627 RT_ASSERT(thread != RT_NULL);
629
630 /* current context checking */
632
633 /* reset thread error */
634 thread->error = RT_EOK;
635
636 /* lock scheduler since current thread may be suspended */
637 critical_level = rt_enter_critical();
638
639 /* suspend thread */
641
642 /* reset the timeout of thread timer and start it */
643 if (err == RT_EOK)
644 {
646 rt_timer_start(&(thread->thread_timer));
647
648 thread->error = -RT_EINTR;
649
650 /* notify a pending rescheduling */
651 rt_schedule();
652
653 /* exit critical and do a rescheduling */
654 rt_exit_critical_safe(critical_level);
655
656 /* clear error number of this thread to RT_EOK */
657 if (thread->error == -RT_ETIMEOUT)
658 thread->error = RT_EOK;
659 }
660 else
661 {
662 rt_exit_critical_safe(critical_level);
663 }
664
665 return err;
666}
667
677{
678 return _thread_sleep(tick);
679}
681
693{
694 struct rt_thread *thread;
695 rt_tick_t cur_tick;
696 rt_base_t critical_level;
697
698 RT_ASSERT(tick != RT_NULL);
699
700 /* set to current thread */
701 thread = rt_thread_self();
702 RT_ASSERT(thread != RT_NULL);
704
705 /* reset thread error */
706 thread->error = RT_EOK;
707
708 /* disable interrupt */
709 critical_level = rt_enter_critical();
710
711 cur_tick = rt_tick_get();
712 if (cur_tick - *tick < inc_tick)
713 {
714 rt_tick_t left_tick;
715
716 *tick += inc_tick;
717 left_tick = *tick - cur_tick;
718
719 /* suspend thread */
721
722 /* reset the timeout of thread timer and start it */
724 rt_timer_start(&(thread->thread_timer));
725
726 rt_exit_critical_safe(critical_level);
727
728 rt_schedule();
729
730 /* clear error number of this thread to RT_EOK */
731 if (thread->error == -RT_ETIMEOUT)
732 {
733 thread->error = RT_EOK;
734 }
735 }
736 else
737 {
738 *tick = cur_tick;
739 rt_exit_critical_safe(critical_level);
740 }
741
742 return thread->error;
743}
745
755{
756 rt_tick_t tick;
757
758 tick = rt_tick_from_millisecond(ms);
759
760 return _thread_sleep(tick);
761}
763
764#ifdef RT_USING_SMP
765#endif
766
787rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
788{
789 /* parameter check */
790 RT_ASSERT(thread != RT_NULL);
792
793 switch (cmd)
794 {
796 {
799 rt_sched_lock(&slvl);
801 rt_sched_unlock(slvl);
802 return error;
803 }
804
806 {
807 return rt_thread_startup(thread);
808 }
809
811 {
812 rt_err_t rt_err = -RT_EINVAL;
813
815 {
816 rt_err = rt_thread_detach(thread);
817 }
818 #ifdef RT_USING_HEAP
819 else
820 {
821 rt_err = rt_thread_delete(thread);
822 }
823 #endif /* RT_USING_HEAP */
824 rt_schedule();
825 return rt_err;
826 }
827
829 {
830 rt_uint8_t cpu;
831
832 cpu = (rt_uint8_t)(rt_size_t)arg;
833 return rt_sched_thread_bind_cpu(thread, cpu);
834 }
835
836 default:
837 break;
838 }
839
840 return RT_EOK;
841}
843
844#ifdef RT_USING_SMART
845#include <lwp_signal.h>
846#endif
847
848static void _thread_set_suspend_state(struct rt_thread *thread, int suspend_flag)
849{
851
852 RT_ASSERT(thread != RT_NULL);
853 switch (suspend_flag)
854 {
855 case RT_INTERRUPTIBLE:
857 break;
858 case RT_KILLABLE:
860 break;
863 break;
864 default:
865 RT_ASSERT(0);
866 break;
867 }
868 RT_SCHED_CTX(thread).stat = stat | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
869}
870
897rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag)
898{
901
902 /* parameter check */
903 RT_ASSERT(thread != RT_NULL);
905 RT_ASSERT(thread == rt_thread_self());
906
907 LOG_D("thread suspend: %s", thread->parent.name);
908
909 rt_sched_lock(&slvl);
910
913 {
914 LOG_D("thread suspend: thread disorder, 0x%2x", thread->stat);
915 rt_sched_unlock(slvl);
916 return -RT_ERROR;
917 }
918
919 if (stat == RT_THREAD_RUNNING)
920 {
921 /* not suspend running status thread on other core */
922 RT_ASSERT(thread == rt_thread_self());
923 }
924
925#ifdef RT_USING_SMART
926 if (thread->lwp)
927 {
928 rt_sched_unlock(slvl);
929
930 /* check pending signals for thread before suspend */
931 if (lwp_thread_signal_suspend_check(thread, suspend_flag) == 0)
932 {
933 /* not to suspend */
934 return -RT_EINTR;
935 }
936
937 rt_sched_lock(&slvl);
938 if (stat == RT_THREAD_READY)
939 {
941
942 if (stat != RT_THREAD_READY)
943 {
944 /* status updated while we check for signal */
945 rt_sched_unlock(slvl);
946 return -RT_ERROR;
947 }
948 }
949 }
950#endif
951
952 /* change thread stat */
953 rt_sched_remove_thread(thread);
954 _thread_set_suspend_state(thread, suspend_flag);
955
956 if (susp_list)
957 {
962 rt_susp_list_enqueue(susp_list, thread, ipc_flags);
963 }
964
965 /* stop thread timer anyway */
967
968 rt_sched_unlock(slvl);
969
970 RT_OBJECT_HOOK_CALL(rt_thread_suspend_hook, (thread));
971 return RT_EOK;
972}
974
992{
993 return rt_thread_suspend_to_list(thread, RT_NULL, 0, suspend_flag);
994}
996
1002
1012{
1015
1016 /* parameter check */
1017 RT_ASSERT(thread != RT_NULL);
1019
1020 LOG_D("thread resume: %s", thread->parent.name);
1021
1022 rt_sched_lock(&slvl);
1023
1024 error = rt_sched_thread_ready(thread);
1025
1026 if (!error)
1027 {
1029
1034 if (error == -RT_ESCHEDLOCKED)
1035 {
1036 error = RT_EOK;
1037 }
1038 }
1039 else
1040 {
1041 rt_sched_unlock(slvl);
1042 }
1043
1044 RT_OBJECT_HOOK_CALL(rt_thread_resume_hook, (thread));
1045
1046 return error;
1047}
1049
1050#ifdef RT_USING_SMART
1058rt_err_t rt_thread_wakeup(rt_thread_t thread)
1059{
1061 rt_err_t ret;
1062 rt_wakeup_func_t func = RT_NULL;
1063
1064 RT_ASSERT(thread != RT_NULL);
1066
1067 rt_sched_lock(&slvl);
1068 func = thread->wakeup_handle.func;
1069 thread->wakeup_handle.func = RT_NULL;
1070 rt_sched_unlock(slvl);
1071
1072 if (func)
1073 {
1074 ret = func(thread->wakeup_handle.user_data, thread);
1075 }
1076 else
1077 {
1078 ret = rt_thread_resume(thread);
1079 }
1080 return ret;
1081}
1082RTM_EXPORT(rt_thread_wakeup);
1083
1084void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data)
1085{
1087
1088 RT_ASSERT(thread != RT_NULL);
1090
1091 rt_sched_lock(&slvl);
1092 thread->wakeup_handle.func = func;
1093 thread->wakeup_handle.user_data = user_data;
1094 rt_sched_unlock(slvl);
1095}
1096RTM_EXPORT(rt_thread_wakeup_set);
1097#endif
1112
1114
1128{
1129 return (thread == RT_NULL) ? -RT_EINVAL : rt_object_get_name(&thread->parent, name, name_size);
1130}
1132
struct rt_cpu * rt_cpu_self(void)
This fucntion will return current cpu object.
rt_err_t rt_timer_detach(rt_timer_t timer)
This function will detach a timer from timer management.
rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
This function will get or set some options of the timer
rt_err_t rt_timer_start(rt_timer_t timer)
This function will start the timer
void rt_timer_init(rt_timer_t timer, const char *name, void(*timeout)(void *parameter), void *parameter, rt_tick_t time, rt_uint8_t flag)
This function will initialize a timer normally this function is used to initialize a static timer obj...
rt_tick_t rt_tick_get(void)
This function will return current tick from operating system startup.
定义 clock.c:69
#define RT_TIMER_CTRL_SET_TIME
rt_tick_t rt_tick_from_millisecond(rt_int32_t ms)
This function will calculate the tick from millisecond.
#define RT_TIMER_FLAG_THREAD_TIMER
#define RT_TIMER_FLAG_ONE_SHOT
int stat(const char *file, struct stat *buf)
void rt_thread_defunct_enqueue(rt_thread_t thread)
Enqueue a thread to defunct queue.
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
This function will disable the local interrupt and then lock the spinlock, will lock the thread sched...
void rt_spin_lock_init(struct rt_spinlock *lock)
Initialize a static spinlock object.
rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags)
Add a thread to the suspend list
定义 ipc.c:241
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
This function will unlock the spinlock and then restore current cpu interrupt status,...
void rt_object_delete(rt_object_t object)
This function will delete an object and release object memory.
void rt_object_init(struct rt_object *object, enum rt_object_class_type type, const char *name)
This function will initialize an object and add it to object system management.
rt_bool_t rt_object_is_systemobject(rt_object_t object)
This function will judge the object is system object or not.
#define RT_OBJECT_HOOK_CALL(func, argv)
rt_err_t rt_object_get_name(rt_object_t object, char *name, rt_uint8_t name_size)
This function will return the name of the specified object container
rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
This function will allocate an object from object system.
rt_object_t rt_object_find(const char *name, rt_uint8_t type)
This function will find specified name object from object container.
#define RT_OBJECT_HOOKLIST_CALL(name, argv)
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
struct rt_object * rt_object_t
#define RT_OBJECT_HOOKLIST_DEFINE(name)
@ RT_Object_Class_Thread
@ RT_Object_Class_Mutex
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
#define rt_list_entry(node, type, member)
get the struct for this entry
#define rt_list_for_each_safe(pos, n, head)
#define RT_ASSERT(EX)
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
#define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
rt_err_t rt_thread_startup(rt_thread_t thread)
This function will start a thread and put it to system ready queue.
rt_err_t rt_thread_close(rt_thread_t thread)
This function will close a thread. The thread object will be removed from thread queue and detached/d...
rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size)
This function will return the name of the specified thread
#define RT_THREAD_CLOSE
void rt_exit_critical_safe(rt_base_t critical_level)
rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick)
This function will let current thread delay until (*tick + inc_tick).
#define RT_THREAD_CTRL_BIND_CPU
rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg)
This function will control thread behaviors according to control command.
#define RT_THREAD_READY
#define RT_THREAD_STAT_MASK
rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag)
This function will suspend the specified thread and change it to suspend state.
rt_err_t rt_thread_mdelay(rt_int32_t ms)
This function will let current thread delay for some milliseconds.
rt_err_t rt_thread_detach(rt_thread_t thread)
This function will detach a thread. The thread object will be removed from thread queue and detached/...
#define RT_THREAD_SUSPEND_INTERRUPTIBLE
rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag)
This function will suspend the specified thread and change it to suspend state.
rt_err_t rt_thread_yield(void)
This function will let current thread yield processor, and scheduler will choose the highest thread t...
rt_thread_t rt_thread_self(void)
This function will return self thread object.
rt_err_t rt_thread_delay(rt_tick_t tick)
This function will let current thread delay for some ticks.
rt_err_t rt_thread_delete(rt_thread_t thread)
This function will delete a thread. The thread object will be removed from thread queue and deleted f...
#define RT_THREAD_SUSPEND_UNINTERRUPTIBLE
#define RT_THREAD_SUSPEND_KILLABLE
#define RT_THREAD_RUNNING
rt_thread_t rt_thread_find(char *name)
This function will find the specified thread.
void rt_thread_suspend_sethook(void(*hook)(rt_thread_t thread))
rt_err_t rt_thread_resume(rt_thread_t thread)
This function will resume a thread and put it to system ready queue.
rt_err_t rt_thread_suspend(rt_thread_t thread)
rt_base_t rt_enter_critical(void)
This function will lock the thread scheduler.
rt_thread_t rt_thread_create(const char *name, void(*entry)(void *parameter), void *parameter, rt_uint32_t stack_size, rt_uint8_t priority, rt_uint32_t tick)
This function will create a thread object and allocate thread object memory. and stack.
rt_err_t rt_thread_init(struct rt_thread *thread, const char *name, void(*entry)(void *parameter), void *parameter, void *stack_start, rt_uint32_t stack_size, rt_uint8_t priority, rt_uint32_t tick)
This function will initialize a thread. It's used to initialize a static thread object.
#define RT_THREAD_CTRL_CHANGE_PRIORITY
struct rt_thread * rt_thread_t
#define RT_THREAD_CTRL_CLOSE
void rt_thread_resume_sethook(void(*hook)(rt_thread_t thread))
#define RT_THREAD_INIT
#define RT_THREAD_CTRL_STARTUP
void rt_schedule(void)
This function will perform one scheduling. It will select one thread with the highest priority level ...
rt_uint16_t rt_critical_level(void)
Get the scheduler lock level.
@ RT_KILLABLE
@ RT_INTERRUPTIBLE
@ RT_UNINTERRUPTIBLE
void rt_mutex_drop_thread(rt_mutex_t mutex, rt_thread_t thread)
drop a thread from the suspend list of mutex
定义 ipc.c:1078
rt_err_t rt_mutex_release(rt_mutex_t mutex)
This function will release a mutex. If there is thread suspended on the mutex, the thread will be res...
定义 ipc.c:1589
#define rt_atomic_store(ptr, v)
#define LOG_D(...)
#define RT_UNUSED(x)
#define RT_KERNEL_MALLOC(sz)
#define rt_hw_local_irq_disable
定义 rthw.h:152
rt_uint8_t * rt_hw_stack_init(void *entry, void *parameter, rt_uint8_t *stack_addr, void *exit)
#define rt_hw_local_irq_enable
定义 rthw.h:153
#define RTM_EXPORT(symbol)
定义 rtm.h:33
#define RT_SCHED_CTX(thread)
#define RT_THREAD_LIST_NODE(thread)
rt_ubase_t rt_sched_lock_level_t
rt_int32_t rt_base_t
rt_base_t rt_err_t
unsigned char rt_uint8_t
#define RT_TRUE
rt_uint32_t rt_tick_t
rt_ubase_t rt_size_t
struct rt_list_node rt_list_t
unsigned int rt_uint32_t
#define RT_FALSE
rt_uint32_t rt_ubase_t
#define RT_NULL
signed int rt_int32_t
void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
Update priority of the target thread
rt_err_t rt_sched_thread_close(struct rt_thread *thread)
rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
struct rt_thread * current_thread
struct rt_object parent
struct rt_ipc_object parent
rt_list_t taken_list
rt_uint8_t hold
void * module_id
const char * name
void * parameter
struct rt_spinlock spinlock
void * stack_addr
rt_err_t error
struct rt_object parent
rt_object_t pending_object
rt_thread_cleanup_t cleanup
rt_uint8_t event_info
void * sp
rt_uint32_t event_set
void * entry
rt_uint32_t stack_size
rt_ubase_t user_data
RT_SCHED_THREAD_CTX struct rt_timer thread_timer
rt_list_t taken_object_list