RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
ipc.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2006-03-14 Bernard the first version
9 * 2006-04-25 Bernard implement semaphore
10 * 2006-05-03 Bernard add RT_IPC_DEBUG
11 * modify the type of IPC waiting time to rt_int32_t
12 * 2006-05-10 Bernard fix the semaphore take bug and add IPC object
13 * 2006-05-12 Bernard implement mailbox and message queue
14 * 2006-05-20 Bernard implement mutex
15 * 2006-05-23 Bernard implement fast event
16 * 2006-05-24 Bernard implement event
17 * 2006-06-03 Bernard fix the thread timer init bug
18 * 2006-06-05 Bernard fix the mutex release bug
19 * 2006-06-07 Bernard fix the message queue send bug
20 * 2006-08-04 Bernard add hook support
21 * 2009-05-21 Yi.qiu fix the sem release bug
22 * 2009-07-18 Bernard fix the event clear bug
23 * 2009-09-09 Bernard remove fast event and fix ipc release bug
24 * 2009-10-10 Bernard change semaphore and mutex value to unsigned value
25 * 2009-10-25 Bernard change the mb/mq receive timeout to 0 if the
26 * re-calculated delta tick is a negative number.
27 * 2009-12-16 Bernard fix the rt_ipc_object_suspend issue when IPC flag
28 * is RT_IPC_FLAG_PRIO
29 * 2010-01-20 mbbill remove rt_ipc_object_decrease function.
30 * 2010-04-20 Bernard move memcpy outside interrupt disable in mq
31 * 2010-10-26 yi.qiu add module support in rt_mp_delete and rt_mq_delete
32 * 2010-11-10 Bernard add IPC reset command implementation.
33 * 2011-12-18 Bernard add more parameter checking in message queue
34 * 2013-09-14 Grissiom add an option check in rt_event_recv
35 * 2018-10-02 Bernard add 64bit support for mailbox
36 * 2019-09-16 tyx add send wait support for message queue
37 * 2020-07-29 Meco Man fix thread->event_set/event_info when received an
38 * event without pending
39 * 2020-10-11 Meco Man add value overflow-check code
40 * 2021-01-03 Meco Man implement rt_mb_urgent()
41 * 2021-05-30 Meco Man implement rt_mutex_trytake()
42 * 2022-01-07 Gabriel Moving __on_rt_xxxxx_hook to ipc.c
43 * 2022-01-24 THEWON let rt_mutex_take return thread->error when using signal
44 * 2022-04-08 Stanley Correct descriptions
45 * 2022-10-15 Bernard add nested mutex feature
46 * 2022-10-16 Bernard add prioceiling feature in mutex
47 * 2023-04-16 Xin-zheqi redesigen queue recv and send function return real message size
48 * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
49 */
50
51#include <rtthread.h>
52#include <rthw.h>
53
54#define DBG_TAG "kernel.ipc"
55#define DBG_LVL DBG_INFO
56#include <rtdbg.h>
57
58#define GET_MESSAGEBYTE_ADDR(msg) ((struct rt_mq_message *) msg + 1)
59#if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
60extern void (*rt_object_trytake_hook)(struct rt_object *object);
61extern void (*rt_object_take_hook)(struct rt_object *object);
62extern void (*rt_object_put_hook)(struct rt_object *object);
63#endif /* RT_USING_HOOK */
64
69
83{
84 /* initialize ipc object */
86
87 return RT_EOK;
88}
89
90
105struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error)
106{
108 rt_thread_t thread;
110
112 RT_ASSERT(susp_list != RT_NULL);
113
114 rt_sched_lock(&slvl);
115 if (!rt_list_isempty(susp_list))
116 {
117 thread = RT_THREAD_LIST_NODE_ENTRY(susp_list->next);
119
120 if (error)
121 {
122 LOG_D("%s [error:%d] failed to resume thread:%p from suspended list",
123 __func__, error, thread);
124
125 thread = RT_NULL;
126 }
127 else
128 {
129 /* thread error should not be a negative value */
130 if (thread_error >= 0)
131 {
132 /* set thread error code to notified resuming thread */
133 thread->error = thread_error;
134 }
135 }
136 }
137 else
138 {
139 thread = RT_NULL;
140 }
141 rt_sched_unlock(slvl);
142
143 LOG_D("resume thread:%s\n", thread->parent.name);
144
145 return thread;
146}
147
148
166{
167 struct rt_thread *thread;
168
170
171 /* wakeup all suspended threads */
172 thread = rt_susp_list_dequeue(susp_list, thread_error);
173 while (thread)
174 {
175 /*
176 * resume NEXT thread
177 * In rt_thread_resume function, it will remove current thread from
178 * suspended list
179 */
180 thread = rt_susp_list_dequeue(susp_list, thread_error);
181 }
182
183 return RT_EOK;
184}
185
205 rt_err_t thread_error,
206 struct rt_spinlock *lock)
207{
208 struct rt_thread *thread;
209 rt_base_t level;
210
212
213 do
214 {
215 level = rt_spin_lock_irqsave(lock);
216
217 /*
218 * resume NEXT thread
219 * In rt_thread_resume function, it will remove current thread from
220 * suspended list
221 */
222 thread = rt_susp_list_dequeue(susp_list, thread_error);
223
224 rt_spin_unlock_irqrestore(lock, level);
225 }
226 while (thread);
227
228 return RT_EOK;
229}
230
241rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags)
242{
244
245 switch (ipc_flags)
246 {
247 case RT_IPC_FLAG_FIFO:
248 rt_list_insert_before(susp_list, &RT_THREAD_LIST_NODE(thread));
249 break; /* RT_IPC_FLAG_FIFO */
250
251 case RT_IPC_FLAG_PRIO:
252 {
253 struct rt_list_node *n;
254 struct rt_thread *sthread;
255
256 /* find a suitable position */
257 for (n = susp_list->next; n != susp_list; n = n->next)
258 {
259 sthread = RT_THREAD_LIST_NODE_ENTRY(n);
260
261 /* find out */
263 {
264 /* insert this thread before the sthread */
266 break;
267 }
268 }
269
270 /*
271 * not found a suitable position,
272 * append to the end of suspend_thread list
273 */
274 if (n == susp_list)
275 rt_list_insert_before(susp_list, &RT_THREAD_LIST_NODE(thread));
276 }
277 break;/* RT_IPC_FLAG_PRIO */
278
279 default:
280 RT_ASSERT(0);
281 break;
282 }
283
284 return RT_EOK;
285}
286
291{
292#ifdef RT_USING_CONSOLE
294 struct rt_thread *thread;
295 struct rt_list_node *node;
296
297 rt_sched_lock(&slvl);
298
299 for (node = list->next; node != list; node = node->next)
300 {
301 thread = RT_THREAD_LIST_NODE_ENTRY(node);
302 rt_kprintf("%.*s", RT_NAME_MAX, thread->parent.name);
303
304 if (node->next != list)
305 rt_kprintf("/");
306 }
307
308 rt_sched_unlock(slvl);
309#else
310 (void)list;
311#endif
312}
313
314
315#ifdef RT_USING_SEMAPHORE
320
321static void _sem_object_init(rt_sem_t sem,
322 rt_uint16_t value,
323 rt_uint8_t flag,
324 rt_uint16_t max_value)
325{
326 /* initialize ipc object */
327 _ipc_object_init(&(sem->parent));
328
329 sem->max_value = max_value;
330 /* set initial value */
331 sem->value = value;
332
333 /* set parent */
334 sem->parent.parent.flag = flag;
336}
337
377 const char *name,
378 rt_uint32_t value,
379 rt_uint8_t flag)
380{
381 RT_ASSERT(sem != RT_NULL);
382 RT_ASSERT(value < 0x10000U);
383 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
384
385 /* initialize object */
387
388 _sem_object_init(sem, value, flag, RT_SEM_VALUE_MAX);
389
390 return RT_EOK;
391}
393
394
414{
415 rt_base_t level;
416
417 /* parameter check */
418 RT_ASSERT(sem != RT_NULL);
421
422 level = rt_spin_lock_irqsave(&(sem->spinlock));
423 /* wakeup all suspended threads */
425 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
426
427 /* detach semaphore object */
429
430 return RT_EOK;
431}
433
434#ifdef RT_USING_HEAP
467rt_sem_t rt_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag)
468{
469 rt_sem_t sem;
470
471 RT_ASSERT(value < 0x10000U);
472 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
473
475
476 /* allocate object */
478 if (sem == RT_NULL)
479 return sem;
480
481 _sem_object_init(sem, value, flag, RT_SEM_VALUE_MAX);
482
483 return sem;
484}
486
487
507{
508 rt_ubase_t level;
509
510 /* parameter check */
511 RT_ASSERT(sem != RT_NULL);
514
516
517 level = rt_spin_lock_irqsave(&(sem->spinlock));
518 /* wakeup all suspended threads */
520 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
521
522 /* delete semaphore object */
524
525 return RT_EOK;
526}
528#endif /* RT_USING_HEAP */
529
530
558static rt_err_t _rt_sem_take(rt_sem_t sem, rt_int32_t timeout, int suspend_flag)
559{
560 rt_base_t level;
561 struct rt_thread *thread;
562 rt_err_t ret;
563
564 /* parameter check */
565 RT_ASSERT(sem != RT_NULL);
567
568 RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(sem->parent.parent)));
569
570 /* current context checking */
572
573 level = rt_spin_lock_irqsave(&(sem->spinlock));
574
575 LOG_D("thread %s take sem:%s, which value is: %d",
577 sem->parent.parent.name,
578 sem->value);
579
580 if (sem->value > 0)
581 {
582 /* semaphore is available */
583 sem->value --;
584 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
585 }
586 else
587 {
588 /* no waiting, return with timeout */
589 if (timeout == 0)
590 {
591 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
592 return -RT_ETIMEOUT;
593 }
594 else
595 {
596 /* semaphore is unavailable, push to suspend list */
597 /* get current thread */
598 thread = rt_thread_self();
599
600 /* reset thread error number */
601 thread->error = RT_EINTR;
602
603 LOG_D("sem take: suspend thread - %s", thread->parent.name);
604
605 /* suspend thread */
606 ret = rt_thread_suspend_to_list(thread, &(sem->parent.suspend_thread),
607 sem->parent.parent.flag, suspend_flag);
608 if (ret != RT_EOK)
609 {
610 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
611 return ret;
612 }
613
614 /* has waiting time, start thread timer */
615 if (timeout > 0)
616 {
617 LOG_D("set thread:%s to timer list", thread->parent.name);
618
619 /* reset the timeout of thread timer and start it */
622 &timeout);
623 rt_timer_start(&(thread->thread_timer));
624 }
625
626 /* enable interrupt */
627 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
628
629 /* do schedule */
630 rt_schedule();
631
632 if (thread->error != RT_EOK)
633 {
634 return thread->error > 0 ? -thread->error : thread->error;
635 }
636 }
637 }
638
639 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(sem->parent.parent)));
640
641 return RT_EOK;
642}
643
645{
646 return _rt_sem_take(sem, time, RT_UNINTERRUPTIBLE);
647}
649
651{
652 return _rt_sem_take(sem, time, RT_INTERRUPTIBLE);
653}
655
657{
658 return _rt_sem_take(sem, time, RT_KILLABLE);
659}
661
681
682
696{
697 rt_base_t level;
698 rt_bool_t need_schedule;
699
700 /* parameter check */
701 RT_ASSERT(sem != RT_NULL);
703
704 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(sem->parent.parent)));
705
706 need_schedule = RT_FALSE;
707
708 level = rt_spin_lock_irqsave(&(sem->spinlock));
709
710 LOG_D("thread %s releases sem:%s, which value is: %d",
712 sem->parent.parent.name,
713 sem->value);
714
716 {
717 /* resume the suspended thread */
719 need_schedule = RT_TRUE;
720 }
721 else
722 {
723 if(sem->value < sem->max_value)
724 {
725 sem->value ++; /* increase value */
726 }
727 else
728 {
729 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
730 return -RT_EFULL; /* value overflowed */
731 }
732 }
733
734 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
735
736 /* resume a thread, re-schedule */
737 if (need_schedule == RT_TRUE)
738 rt_schedule();
739
740 return RT_EOK;
741}
743
744
759rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg)
760{
761 rt_base_t level;
762
763 /* parameter check */
764 RT_ASSERT(sem != RT_NULL);
766
767 if (cmd == RT_IPC_CMD_RESET)
768 {
769 rt_ubase_t value;
770
771 /* get value */
772 value = (rt_uintptr_t)arg;
773 level = rt_spin_lock_irqsave(&(sem->spinlock));
774
775 /* resume all waiting thread */
777
778 /* set new value */
779 sem->value = (rt_uint16_t)value;
780 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
781 rt_schedule();
782
783 return RT_EOK;
784 }
785 else if (cmd == RT_IPC_CMD_SET_VLIMIT)
786 {
787 rt_ubase_t max_value;
788 rt_bool_t need_schedule = RT_FALSE;
789
790 max_value = (rt_uint16_t)((rt_uintptr_t)arg);
791 if (max_value > RT_SEM_VALUE_MAX || max_value < 1)
792 {
793 return -RT_EINVAL;
794 }
795
796 level = rt_spin_lock_irqsave(&(sem->spinlock));
797 if (max_value < sem->value)
798 {
800 {
801 /* resume all waiting thread */
803 need_schedule = RT_TRUE;
804 }
805 }
806 /* set new value */
807 sem->max_value = max_value;
808 rt_spin_unlock_irqrestore(&(sem->spinlock), level);
809
810 if (need_schedule)
811 {
812 rt_schedule();
813 }
814
815 return RT_EOK;
816 }
817
818 return -RT_ERROR;
819}
821
823#endif /* RT_USING_SEMAPHORE */
824
825#ifdef RT_USING_MUTEX
826/* iterate over each suspended thread to update highest priority in pending threads */
828{
829 struct rt_thread *thread;
830
832 {
835 }
836 else
837 {
838 mutex->priority = 0xff;
839 }
840
841 return mutex->priority;
842}
843
844/* get highest priority inside its taken object and its init priority */
846{
847 rt_list_t *node = RT_NULL;
848 struct rt_mutex *mutex = RT_NULL;
850
851 rt_list_for_each(node, &(thread->taken_object_list))
852 {
853 mutex = rt_list_entry(node, struct rt_mutex, taken_list);
854 rt_uint8_t mutex_prio = mutex->priority;
855 /* prio at least be priority ceiling */
856 mutex_prio = mutex_prio < mutex->ceiling_priority ? mutex_prio : mutex->ceiling_priority;
857
858 if (priority > mutex_prio)
859 {
860 priority = mutex_prio;
861 }
862 }
863
864 return priority;
865}
866
867/* update priority of target thread and the thread suspended it if any */
868rt_inline void _thread_update_priority(struct rt_thread *thread, rt_uint8_t priority, int suspend_flag)
869{
870 rt_err_t ret = -RT_ERROR;
871 struct rt_object* pending_obj = RT_NULL;
872
873 LOG_D("thread:%s priority -> %d", thread->parent.name, priority);
874
875 /* change priority of the thread */
876 ret = rt_sched_thread_change_priority(thread, priority);
877
878 while ((ret == RT_EOK) && rt_sched_thread_is_suspended(thread))
879 {
880 /* whether change the priority of taken mutex */
881 pending_obj = thread->pending_object;
882
883 if (pending_obj && rt_object_get_type(pending_obj) == RT_Object_Class_Mutex)
884 {
885 rt_uint8_t mutex_priority = 0xff;
886 struct rt_mutex* pending_mutex = (struct rt_mutex *)pending_obj;
887
888 /* re-insert thread to suspended thread list to resort priority list */
890
892 &(pending_mutex->parent.suspend_thread), thread,
893 pending_mutex->parent.parent.flag);
894 if (ret == RT_EOK)
895 {
896 /* update priority */
897 _mutex_update_priority(pending_mutex);
898 /* change the priority of mutex owner thread */
899 LOG_D("mutex: %s priority -> %d", pending_mutex->parent.parent.name,
900 pending_mutex->priority);
901
902 mutex_priority = _thread_get_mutex_priority(pending_mutex->owner);
903 if (mutex_priority != rt_sched_thread_get_curr_prio(pending_mutex->owner))
904 {
905 thread = pending_mutex->owner;
906
907 ret = rt_sched_thread_change_priority(thread, mutex_priority);
908 }
909 else
910 {
911 ret = -RT_ERROR;
912 }
913 }
914 }
915 else
916 {
917 ret = -RT_ERROR;
918 }
919 }
920}
921
922static rt_bool_t _check_and_update_prio(rt_thread_t thread, rt_mutex_t mutex)
923{
925 rt_bool_t do_sched = RT_FALSE;
926
927 if ((mutex->ceiling_priority != 0xFF) || (rt_sched_thread_get_curr_prio(thread) == mutex->priority))
928 {
929 rt_uint8_t priority = 0xff;
930
931 /* get the highest priority in the taken list of thread */
933
935
940 do_sched = RT_TRUE;
941 }
942 return do_sched;
943}
944
945static void _mutex_before_delete_detach(rt_mutex_t mutex)
946{
948 rt_bool_t need_schedule = RT_FALSE;
949
950 rt_spin_lock(&(mutex->spinlock));
951 /* wakeup all suspended threads */
952 rt_susp_list_resume_all(&(mutex->parent.suspend_thread), RT_ERROR);
953
954 rt_sched_lock(&slvl);
955
956 /* remove mutex from thread's taken list */
957 rt_list_remove(&mutex->taken_list);
958
959 /* whether change the thread priority */
960 if (mutex->owner)
961 {
962 need_schedule = _check_and_update_prio(mutex->owner, mutex);
963 }
964
965 if (need_schedule)
966 {
968 }
969 else
970 {
971 rt_sched_unlock(slvl);
972 }
973
974 /* unlock and do necessary reschedule if required */
975 rt_spin_unlock(&(mutex->spinlock));
976}
977
982
1007rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag)
1008{
1009 /* flag parameter has been obsoleted */
1010 RT_UNUSED(flag);
1011
1012 /* parameter check */
1013 RT_ASSERT(mutex != RT_NULL);
1014
1015 /* initialize object */
1017
1018 /* initialize ipc object */
1019 _ipc_object_init(&(mutex->parent));
1020
1021 mutex->owner = RT_NULL;
1022 mutex->priority = 0xFF;
1023 mutex->hold = 0;
1024 mutex->ceiling_priority = 0xFF;
1025 rt_list_init(&(mutex->taken_list));
1026
1027 /* flag can only be RT_IPC_FLAG_PRIO. RT_IPC_FLAG_FIFO cannot solve the unbounded priority inversion problem */
1029 rt_spin_lock_init(&(mutex->spinlock));
1030
1031 return RT_EOK;
1032}
1034
1035
1055{
1056 /* parameter check */
1057 RT_ASSERT(mutex != RT_NULL);
1060
1061 _mutex_before_delete_detach(mutex);
1062
1063 /* detach mutex object */
1064 rt_object_detach(&(mutex->parent.parent));
1065
1066 return RT_EOK;
1067}
1069
1070/* drop a thread from the suspend list of mutex */
1071
1079{
1081 rt_bool_t need_update = RT_FALSE;
1083
1084 /* parameter check */
1086 RT_ASSERT(mutex != RT_NULL);
1087 RT_ASSERT(thread != RT_NULL);
1088
1089 rt_spin_lock(&(mutex->spinlock));
1090
1091 RT_ASSERT(thread->pending_object == &mutex->parent.parent);
1092
1093 rt_sched_lock(&slvl);
1094
1095 /* detach from suspended list */
1097
1105 if (mutex->owner && rt_sched_thread_get_curr_prio(mutex->owner) ==
1107 {
1108 need_update = RT_TRUE;
1109 }
1110
1111 /* update the priority of mutex */
1113 {
1114 /* more thread suspended in the list */
1115 struct rt_thread *th;
1116
1118 /* update the priority of mutex */
1120 }
1121 else
1122 {
1123 /* set mutex priority to maximal priority */
1124 mutex->priority = 0xff;
1125 }
1126
1127 /* try to change the priority of mutex owner thread */
1128 if (need_update)
1129 {
1130 /* get the maximal priority of mutex in thread */
1131 priority = _thread_get_mutex_priority(mutex->owner);
1132 if (priority != rt_sched_thread_get_curr_prio(mutex->owner))
1133 {
1135 }
1136 }
1137
1138 rt_sched_unlock(slvl);
1139 rt_spin_unlock(&(mutex->spinlock));
1140}
1141
1142
1152{
1153 rt_uint8_t ret_priority = 0xFF;
1154 rt_uint8_t highest_prio;
1156
1158
1159 if ((mutex) && (priority < RT_THREAD_PRIORITY_MAX))
1160 {
1161 /* critical section here if multiple updates to one mutex happen */
1162 rt_spin_lock(&(mutex->spinlock));
1163 ret_priority = mutex->ceiling_priority;
1164 mutex->ceiling_priority = priority;
1165 if (mutex->owner)
1166 {
1167 rt_sched_lock(&slvl);
1168 highest_prio = _thread_get_mutex_priority(mutex->owner);
1169 if (highest_prio != rt_sched_thread_get_curr_prio(mutex->owner))
1170 {
1171 _thread_update_priority(mutex->owner, highest_prio, RT_UNINTERRUPTIBLE);
1172 }
1173 rt_sched_unlock(slvl);
1174 }
1175 rt_spin_unlock(&(mutex->spinlock));
1176 }
1177 else
1178 {
1179 rt_set_errno(-RT_EINVAL);
1180 }
1181
1182 return ret_priority;
1183}
1185
1186
1195{
1196 rt_uint8_t prio = 0xFF;
1197
1198 /* parameter check */
1200 RT_ASSERT(mutex != RT_NULL);
1201
1202 if (mutex)
1203 {
1204 rt_spin_lock(&(mutex->spinlock));
1205 prio = mutex->ceiling_priority;
1206 rt_spin_unlock(&(mutex->spinlock));
1207 }
1208
1209 return prio;
1210}
1212
1213
1214#ifdef RT_USING_HEAP
1234{
1235 struct rt_mutex *mutex;
1236
1237 /* flag parameter has been obsoleted */
1238 RT_UNUSED(flag);
1239
1241
1242 /* allocate object */
1244 if (mutex == RT_NULL)
1245 return mutex;
1246
1247 /* initialize ipc object */
1248 _ipc_object_init(&(mutex->parent));
1249
1250 mutex->owner = RT_NULL;
1251 mutex->priority = 0xFF;
1252 mutex->hold = 0;
1253 mutex->ceiling_priority = 0xFF;
1254 rt_list_init(&(mutex->taken_list));
1255
1256 /* flag can only be RT_IPC_FLAG_PRIO. RT_IPC_FLAG_FIFO cannot solve the unbounded priority inversion problem */
1258 rt_spin_lock_init(&(mutex->spinlock));
1259
1260 return mutex;
1261}
1263
1264
1284{
1285 /* parameter check */
1286 RT_ASSERT(mutex != RT_NULL);
1289
1291
1292 _mutex_before_delete_detach(mutex);
1293
1294 /* delete mutex object */
1295 rt_object_delete(&(mutex->parent.parent));
1296
1297 return RT_EOK;
1298}
1300#endif /* RT_USING_HEAP */
1301
1302
1326static rt_err_t _rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout, int suspend_flag)
1327{
1328 struct rt_thread *thread;
1329 rt_err_t ret;
1330
1331 /* this function must not be used in interrupt even if time = 0 */
1332 /* current context checking */
1334
1335 /* parameter check */
1336 RT_ASSERT(mutex != RT_NULL);
1338
1339 /* get current thread */
1340 thread = rt_thread_self();
1341
1342 rt_spin_lock(&(mutex->spinlock));
1343
1344 RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(mutex->parent.parent)));
1345
1346 LOG_D("mutex_take: current thread %s, hold: %d",
1347 thread->parent.name, mutex->hold);
1348
1349 /* reset thread error */
1350 thread->error = RT_EOK;
1351
1352 if (mutex->owner == thread)
1353 {
1354 if (mutex->hold < RT_MUTEX_HOLD_MAX)
1355 {
1356 /* it's the same thread */
1357 mutex->hold ++;
1358 }
1359 else
1360 {
1361 rt_spin_unlock(&(mutex->spinlock));
1362 return -RT_EFULL; /* value overflowed */
1363 }
1364 }
1365 else
1366 {
1367 /* whether the mutex has owner thread. */
1368 if (mutex->owner == RT_NULL)
1369 {
1370 /* set mutex owner and original priority */
1371 mutex->owner = thread;
1372 mutex->priority = 0xff;
1373 mutex->hold = 1;
1374
1375 if (mutex->ceiling_priority != 0xFF)
1376 {
1377 /* set the priority of thread to the ceiling priority */
1379 _thread_update_priority(mutex->owner, mutex->ceiling_priority, suspend_flag);
1380 }
1381
1382 /* insert mutex to thread's taken object list */
1384 }
1385 else
1386 {
1387 /* no waiting, return with timeout */
1388 if (timeout == 0)
1389 {
1390 /* set error as timeout */
1391 thread->error = RT_ETIMEOUT;
1392
1393 rt_spin_unlock(&(mutex->spinlock));
1394 return -RT_ETIMEOUT;
1395 }
1396 else
1397 {
1399 rt_uint8_t priority;
1400
1401 /* mutex is unavailable, push to suspend list */
1402 LOG_D("mutex_take: suspend thread: %s",
1403 thread->parent.name);
1404
1405 /* suspend current thread */
1406 ret = rt_thread_suspend_to_list(thread, &(mutex->parent.suspend_thread),
1407 mutex->parent.parent.flag, suspend_flag);
1408 if (ret != RT_EOK)
1409 {
1410 rt_spin_unlock(&(mutex->spinlock));
1411 return ret;
1412 }
1413
1414 /* set pending object in thread to this mutex */
1415 thread->pending_object = &(mutex->parent.parent);
1416
1417 rt_sched_lock(&slvl);
1418
1419 priority = rt_sched_thread_get_curr_prio(thread);
1420
1421 /* update the priority level of mutex */
1422 if (priority < mutex->priority)
1423 {
1424 mutex->priority = priority;
1425 if (mutex->priority < rt_sched_thread_get_curr_prio(mutex->owner))
1426 {
1427 _thread_update_priority(mutex->owner, priority, RT_UNINTERRUPTIBLE); /* TODO */
1428 }
1429 }
1430
1431 rt_sched_unlock(slvl);
1432
1433 /* has waiting time, start thread timer */
1434 if (timeout > 0)
1435 {
1436 LOG_D("mutex_take: start the timer of thread:%s",
1437 thread->parent.name);
1438
1439 /* reset the timeout of thread timer and start it */
1440 rt_timer_control(&(thread->thread_timer),
1442 &timeout);
1443 rt_timer_start(&(thread->thread_timer));
1444 }
1445
1446 rt_spin_unlock(&(mutex->spinlock));
1447
1448 /* do schedule */
1449 rt_schedule();
1450
1451 rt_spin_lock(&(mutex->spinlock));
1452
1453 if (mutex->owner == thread)
1454 {
1459 RT_ASSERT(thread->error == RT_EOK);
1460 }
1461 else
1462 {
1463 /* the mutex has not been taken and thread has detach from the pending list. */
1464
1465 rt_bool_t need_update = RT_FALSE;
1466 RT_ASSERT(mutex->owner != thread);
1467
1468 /* get value first before calling to other APIs */
1469 ret = thread->error;
1470
1471 /* unexpected resume */
1472 if (ret == RT_EOK)
1473 {
1474 ret = -RT_EINTR;
1475 }
1476
1477 rt_sched_lock(&slvl);
1478
1487 need_update = RT_TRUE;
1488
1489 /* update the priority of mutex */
1491 {
1492 /* more thread suspended in the list */
1493 struct rt_thread *th;
1494
1496 /* update the priority of mutex */
1498 }
1499 else
1500 {
1501 /* set mutex priority to maximal priority */
1502 mutex->priority = 0xff;
1503 }
1504
1505 /* try to change the priority of mutex owner thread */
1506 if (need_update)
1507 {
1508 /* get the maximal priority of mutex in thread */
1509 priority = _thread_get_mutex_priority(mutex->owner);
1510 if (priority != rt_sched_thread_get_curr_prio(mutex->owner))
1511 {
1513 }
1514 }
1515
1516 rt_sched_unlock(slvl);
1517
1518 rt_spin_unlock(&(mutex->spinlock));
1519
1520 /* clear pending object before exit */
1521 thread->pending_object = RT_NULL;
1522
1523 /* fix thread error number to negative value and return */
1524 return ret > 0 ? -ret : ret;
1525 }
1526 }
1527 }
1528 }
1529
1530 rt_spin_unlock(&(mutex->spinlock));
1531
1532 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mutex->parent.parent)));
1533
1534 return RT_EOK;
1535}
1536
1538{
1539 return _rt_mutex_take(mutex, time, RT_UNINTERRUPTIBLE);
1540}
1542
1544{
1545 return _rt_mutex_take(mutex, time, RT_INTERRUPTIBLE);
1546}
1548
1550{
1551 return _rt_mutex_take(mutex, time, RT_KILLABLE);
1552}
1554
1571{
1572 return rt_mutex_take(mutex, RT_WAITING_NO);
1573}
1575
1576
1590{
1592 struct rt_thread *thread;
1593 rt_bool_t need_schedule;
1594
1595 /* parameter check */
1596 RT_ASSERT(mutex != RT_NULL);
1598
1599 need_schedule = RT_FALSE;
1600
1601 /* only thread could release mutex because we need test the ownership */
1603
1604 /* get current thread */
1605 thread = rt_thread_self();
1606
1607 rt_spin_lock(&(mutex->spinlock));
1608
1609 LOG_D("mutex_release:current thread %s, hold: %d",
1610 thread->parent.name, mutex->hold);
1611
1612 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(mutex->parent.parent)));
1613
1614 /* mutex only can be released by owner */
1615 if (thread != mutex->owner)
1616 {
1617 thread->error = -RT_ERROR;
1618 rt_spin_unlock(&(mutex->spinlock));
1619
1620 return -RT_ERROR;
1621 }
1622
1623 /* decrease hold */
1624 mutex->hold --;
1625 /* if no hold */
1626 if (mutex->hold == 0)
1627 {
1628 rt_sched_lock(&slvl);
1629
1630 /* remove mutex from thread's taken list */
1631 rt_list_remove(&mutex->taken_list);
1632
1633 /* whether change the thread priority */
1634 need_schedule = _check_and_update_prio(thread, mutex);
1635
1636 /* wakeup suspended thread */
1638 {
1639 struct rt_thread *next_thread;
1640 do
1641 {
1642 /* get the first suspended thread */
1644
1646
1647 /* remove the thread from the suspended list of mutex */
1648 rt_list_remove(&RT_THREAD_LIST_NODE(next_thread));
1649
1650 /* resume thread to ready queue */
1651 if (rt_sched_thread_ready(next_thread) != RT_EOK)
1652 {
1657 next_thread = RT_NULL;
1658 }
1659 } while (!next_thread && !rt_list_isempty(&mutex->parent.suspend_thread));
1660
1661 if (next_thread)
1662 {
1663 LOG_D("mutex_release: resume thread: %s",
1664 next_thread->parent.name);
1665
1666 /* set new owner and put mutex into taken list of thread */
1667 mutex->owner = next_thread;
1668 mutex->hold = 1;
1669 rt_list_insert_after(&next_thread->taken_object_list, &mutex->taken_list);
1670
1671 /* cleanup pending object */
1672 next_thread->pending_object = RT_NULL;
1673
1674 /* update mutex priority */
1675 if (!rt_list_isempty(&(mutex->parent.suspend_thread)))
1676 {
1677 struct rt_thread *th;
1678
1681 }
1682 else
1683 {
1684 mutex->priority = 0xff;
1685 }
1686
1687 need_schedule = RT_TRUE;
1688 }
1689 else
1690 {
1691 /* no waiting thread is woke up, clear owner */
1692 mutex->owner = RT_NULL;
1693 mutex->priority = 0xff;
1694 }
1695
1696 rt_sched_unlock(slvl);
1697 }
1698 else
1699 {
1700 rt_sched_unlock(slvl);
1701
1702 /* clear owner */
1703 mutex->owner = RT_NULL;
1704 mutex->priority = 0xff;
1705 }
1706 }
1707
1708 rt_spin_unlock(&(mutex->spinlock));
1709
1710 /* perform a schedule */
1711 if (need_schedule == RT_TRUE)
1712 rt_schedule();
1713
1714 return RT_EOK;
1715}
1717
1718
1733rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg)
1734{
1735 RT_UNUSED(mutex);
1736 RT_UNUSED(cmd);
1737 RT_UNUSED(arg);
1738
1739 return -RT_EINVAL;
1740}
1742
1744#endif /* RT_USING_MUTEX */
1745
1746#ifdef RT_USING_EVENT
1751
1786rt_err_t rt_event_init(rt_event_t event, const char *name, rt_uint8_t flag)
1787{
1788 /* parameter check */
1789 RT_ASSERT(event != RT_NULL);
1790 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
1791
1792 /* initialize object */
1794
1795 /* set parent flag */
1796 event->parent.parent.flag = flag;
1797
1798 /* initialize ipc object */
1799 _ipc_object_init(&(event->parent));
1800
1801 /* initialize event */
1802 event->set = 0;
1803 rt_spin_lock_init(&(event->spinlock));
1804
1805 return RT_EOK;
1806}
1808
1809
1829{
1830 rt_base_t level;
1831
1832 /* parameter check */
1833 RT_ASSERT(event != RT_NULL);
1836
1837 level = rt_spin_lock_irqsave(&(event->spinlock));
1838 /* resume all suspended thread */
1839 rt_susp_list_resume_all(&(event->parent.suspend_thread), RT_ERROR);
1840 rt_spin_unlock_irqrestore(&(event->spinlock), level);
1841
1842 /* detach event object */
1843 rt_object_detach(&(event->parent.parent));
1844
1845 return RT_EOK;
1846}
1848
1849#ifdef RT_USING_HEAP
1879{
1880 rt_event_t event;
1881
1882 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
1883
1885
1886 /* allocate object */
1888 if (event == RT_NULL)
1889 return event;
1890
1891 /* set parent */
1892 event->parent.parent.flag = flag;
1893
1894 /* initialize ipc object */
1895 _ipc_object_init(&(event->parent));
1896
1897 /* initialize event */
1898 event->set = 0;
1899 rt_spin_lock_init(&(event->spinlock));
1900
1901 return event;
1902}
1904
1905
1925{
1926 /* parameter check */
1927 RT_ASSERT(event != RT_NULL);
1930
1932
1933 rt_spin_lock(&(event->spinlock));
1934 /* resume all suspended thread */
1935 rt_susp_list_resume_all(&(event->parent.suspend_thread), RT_ERROR);
1936 rt_spin_unlock(&(event->spinlock));
1937
1938 /* delete event object */
1939 rt_object_delete(&(event->parent.parent));
1940
1941 return RT_EOK;
1942}
1944#endif /* RT_USING_HEAP */
1945
1946
1965{
1966 struct rt_list_node *n;
1967 struct rt_thread *thread;
1969 rt_base_t level;
1970 rt_base_t status;
1971 rt_bool_t need_schedule;
1972 rt_uint32_t need_clear_set = 0;
1973
1974 /* parameter check */
1975 RT_ASSERT(event != RT_NULL);
1977
1978 if (set == 0)
1979 return -RT_ERROR;
1980
1981 need_schedule = RT_FALSE;
1982
1983 level = rt_spin_lock_irqsave(&(event->spinlock));
1984
1985 /* set event */
1986 event->set |= set;
1987
1988 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(event->parent.parent)));
1989
1990 rt_sched_lock(&slvl);
1992 {
1993 /* search thread list to resume thread */
1994 n = event->parent.suspend_thread.next;
1995 while (n != &(event->parent.suspend_thread))
1996 {
1997 /* get thread */
1998 thread = RT_THREAD_LIST_NODE_ENTRY(n);
1999
2000 status = -RT_ERROR;
2001 if (thread->event_info & RT_EVENT_FLAG_AND)
2002 {
2003 if ((thread->event_set & event->set) == thread->event_set)
2004 {
2005 /* received an AND event */
2006 status = RT_EOK;
2007 }
2008 }
2009 else if (thread->event_info & RT_EVENT_FLAG_OR)
2010 {
2011 if (thread->event_set & event->set)
2012 {
2013 /* save the received event set */
2014 thread->event_set = thread->event_set & event->set;
2015
2016 /* received an OR event */
2017 status = RT_EOK;
2018 }
2019 }
2020 else
2021 {
2022 rt_sched_unlock(slvl);
2023 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2024
2025 return -RT_EINVAL;
2026 }
2027
2028 /* move node to the next */
2029 n = n->next;
2030
2031 /* condition is satisfied, resume thread */
2032 if (status == RT_EOK)
2033 {
2034 /* clear event */
2035 if (thread->event_info & RT_EVENT_FLAG_CLEAR)
2036 need_clear_set |= thread->event_set;
2037
2038 /* resume thread, and thread list breaks out */
2039 rt_sched_thread_ready(thread);
2040 thread->error = RT_EOK;
2041
2042 /* need do a scheduling */
2043 need_schedule = RT_TRUE;
2044 }
2045 }
2046 if (need_clear_set)
2047 {
2048 event->set &= ~need_clear_set;
2049 }
2050 }
2051
2052 rt_sched_unlock(slvl);
2053 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2054
2055 /* do a schedule */
2056 if (need_schedule == RT_TRUE)
2057 rt_schedule();
2058
2059 return RT_EOK;
2060}
2062
2063
2096static rt_err_t _rt_event_recv(rt_event_t event,
2097 rt_uint32_t set,
2098 rt_uint8_t option,
2099 rt_int32_t timeout,
2100 rt_uint32_t *recved,
2101 int suspend_flag)
2102{
2103 struct rt_thread *thread;
2104 rt_base_t level;
2105 rt_base_t status;
2106 rt_err_t ret;
2107
2108 /* parameter check */
2109 RT_ASSERT(event != RT_NULL);
2111
2112 /* current context checking */
2114
2115 if (set == 0)
2116 return -RT_ERROR;
2117
2118 /* initialize status */
2119 status = -RT_ERROR;
2120 /* get current thread */
2121 thread = rt_thread_self();
2122 /* reset thread error */
2123 thread->error = -RT_EINTR;
2124
2125 RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(event->parent.parent)));
2126
2127 level = rt_spin_lock_irqsave(&(event->spinlock));
2128
2129 /* check event set */
2130 if (option & RT_EVENT_FLAG_AND)
2131 {
2132 if ((event->set & set) == set)
2133 status = RT_EOK;
2134 }
2135 else if (option & RT_EVENT_FLAG_OR)
2136 {
2137 if (event->set & set)
2138 status = RT_EOK;
2139 }
2140 else
2141 {
2142 /* either RT_EVENT_FLAG_AND or RT_EVENT_FLAG_OR should be set */
2143 RT_ASSERT(0);
2144 }
2145
2146 if (status == RT_EOK)
2147 {
2148 thread->error = RT_EOK;
2149
2150 /* set received event */
2151 if (recved)
2152 *recved = (event->set & set);
2153
2154 /* fill thread event info */
2155 thread->event_set = (event->set & set);
2156 thread->event_info = option;
2157
2158 /* received event */
2159 if (option & RT_EVENT_FLAG_CLEAR)
2160 event->set &= ~set;
2161 }
2162 else if (timeout == 0)
2163 {
2164 /* no waiting */
2165 thread->error = -RT_ETIMEOUT;
2166
2167 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2168
2169 return -RT_ETIMEOUT;
2170 }
2171 else
2172 {
2173 /* fill thread event info */
2174 thread->event_set = set;
2175 thread->event_info = option;
2176
2177 /* put thread to suspended thread list */
2178 ret = rt_thread_suspend_to_list(thread, &(event->parent.suspend_thread),
2179 event->parent.parent.flag, suspend_flag);
2180 if (ret != RT_EOK)
2181 {
2182 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2183 return ret;
2184 }
2185
2186 /* if there is a waiting timeout, active thread timer */
2187 if (timeout > 0)
2188 {
2189 /* reset the timeout of thread timer and start it */
2190 rt_timer_control(&(thread->thread_timer),
2192 &timeout);
2193 rt_timer_start(&(thread->thread_timer));
2194 }
2195
2196 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2197
2198 /* do a schedule */
2199 rt_schedule();
2200
2201 if (thread->error != RT_EOK)
2202 {
2203 /* return error */
2204 return thread->error;
2205 }
2206
2207 /* received an event, disable interrupt to protect */
2208 level = rt_spin_lock_irqsave(&(event->spinlock));
2209
2210 /* set received event */
2211 if (recved)
2212 *recved = thread->event_set;
2213 }
2214
2215 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2216
2217 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(event->parent.parent)));
2218
2219 return thread->error;
2220}
2221
2223 rt_uint32_t set,
2224 rt_uint8_t option,
2225 rt_int32_t timeout,
2226 rt_uint32_t *recved)
2227{
2228 return _rt_event_recv(event, set, option, timeout, recved, RT_UNINTERRUPTIBLE);
2229}
2231
2233 rt_uint32_t set,
2234 rt_uint8_t option,
2235 rt_int32_t timeout,
2236 rt_uint32_t *recved)
2237{
2238 return _rt_event_recv(event, set, option, timeout, recved, RT_INTERRUPTIBLE);
2239}
2241
2243 rt_uint32_t set,
2244 rt_uint8_t option,
2245 rt_int32_t timeout,
2246 rt_uint32_t *recved)
2247{
2248 return _rt_event_recv(event, set, option, timeout, recved, RT_KILLABLE);
2249}
2265rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg)
2266{
2267 rt_base_t level;
2268
2269 RT_UNUSED(arg);
2270
2271 /* parameter check */
2272 RT_ASSERT(event != RT_NULL);
2274
2275 if (cmd == RT_IPC_CMD_RESET)
2276 {
2277 level = rt_spin_lock_irqsave(&(event->spinlock));
2278
2279 /* resume all waiting thread */
2281
2282 /* initialize event set */
2283 event->set = 0;
2284
2285 rt_spin_unlock_irqrestore(&(event->spinlock), level);
2286
2287 rt_schedule();
2288
2289 return RT_EOK;
2290 }
2291
2292 return -RT_ERROR;
2293}
2295
2297#endif /* RT_USING_EVENT */
2298
2299#ifdef RT_USING_MAILBOX
2304
2344 const char *name,
2345 void *msgpool,
2346 rt_size_t size,
2347 rt_uint8_t flag)
2348{
2349 RT_ASSERT(mb != RT_NULL);
2350 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
2351
2352 /* initialize object */
2354
2355 /* set parent flag */
2356 mb->parent.parent.flag = flag;
2357
2358 /* initialize ipc object */
2359 _ipc_object_init(&(mb->parent));
2360
2361 /* initialize mailbox */
2362 mb->msg_pool = (rt_ubase_t *)msgpool;
2363 mb->size = (rt_uint16_t)size;
2364 mb->entry = 0;
2365 mb->in_offset = 0;
2366 mb->out_offset = 0;
2367
2368 /* initialize an additional list of sender suspend thread */
2371
2372 return RT_EOK;
2373}
2375
2376
2396{
2397 rt_base_t level;
2398
2399 /* parameter check */
2400 RT_ASSERT(mb != RT_NULL);
2403
2404 level = rt_spin_lock_irqsave(&(mb->spinlock));
2405 /* resume all suspended thread */
2407 /* also resume all mailbox private suspended thread */
2409 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2410
2411 /* detach mailbox object */
2413
2414 return RT_EOK;
2415}
2417
2418#ifdef RT_USING_HEAP
2450rt_mailbox_t rt_mb_create(const char *name, rt_size_t size, rt_uint8_t flag)
2451{
2452 rt_mailbox_t mb;
2453
2454 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
2455
2457
2458 /* allocate object */
2460 if (mb == RT_NULL)
2461 return mb;
2462
2463 /* set parent */
2464 mb->parent.parent.flag = flag;
2465
2466 /* initialize ipc object */
2467 _ipc_object_init(&(mb->parent));
2468
2469 /* initialize mailbox */
2470 mb->size = (rt_uint16_t)size;
2471 mb->msg_pool = (rt_ubase_t *)RT_KERNEL_MALLOC(mb->size * sizeof(rt_ubase_t));
2472 if (mb->msg_pool == RT_NULL)
2473 {
2474 /* delete mailbox object */
2476
2477 return RT_NULL;
2478 }
2479 mb->entry = 0;
2480 mb->in_offset = 0;
2481 mb->out_offset = 0;
2482
2483 /* initialize an additional list of sender suspend thread */
2486
2487 return mb;
2488}
2490
2491
2511{
2512 /* parameter check */
2513 RT_ASSERT(mb != RT_NULL);
2516
2518 rt_spin_lock(&(mb->spinlock));
2519
2520 /* resume all suspended thread */
2522
2523 /* also resume all mailbox private suspended thread */
2525
2526 rt_spin_unlock(&(mb->spinlock));
2527
2528 /* free mailbox pool */
2530
2531 /* delete mailbox object */
2533
2534 return RT_EOK;
2535}
2537#endif /* RT_USING_HEAP */
2538
2539
2563static rt_err_t _rt_mb_send_wait(rt_mailbox_t mb,
2564 rt_ubase_t value,
2565 rt_int32_t timeout,
2566 int suspend_flag)
2567{
2568 struct rt_thread *thread;
2569 rt_base_t level;
2570 rt_uint32_t tick_delta;
2571 rt_err_t ret;
2572
2573 /* parameter check */
2574 RT_ASSERT(mb != RT_NULL);
2576
2577 /* current context checking */
2578 RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
2579
2580 /* initialize delta tick */
2581 tick_delta = 0;
2582 /* get current thread */
2583 thread = rt_thread_self();
2584
2585 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(mb->parent.parent)));
2586
2587 /* disable interrupt */
2588 level = rt_spin_lock_irqsave(&(mb->spinlock));
2589
2590 /* for non-blocking call */
2591 if (mb->entry == mb->size && timeout == 0)
2592 {
2593 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2594 return -RT_EFULL;
2595 }
2596
2597 /* mailbox is full */
2598 while (mb->entry == mb->size)
2599 {
2600 /* reset error number in thread */
2601 thread->error = -RT_EINTR;
2602
2603 /* no waiting, return timeout */
2604 if (timeout == 0)
2605 {
2606 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2607
2608 return -RT_EFULL;
2609 }
2610
2611 /* suspend current thread */
2613 mb->parent.parent.flag, suspend_flag);
2614
2615 if (ret != RT_EOK)
2616 {
2617 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2618 return ret;
2619 }
2620
2621 /* has waiting time, start thread timer */
2622 if (timeout > 0)
2623 {
2624 /* get the start tick of timer */
2625 tick_delta = rt_tick_get();
2626
2627 LOG_D("mb_send_wait: start timer of thread:%s",
2628 thread->parent.name);
2629
2630 /* reset the timeout of thread timer and start it */
2631 rt_timer_control(&(thread->thread_timer),
2633 &timeout);
2634 rt_timer_start(&(thread->thread_timer));
2635 }
2636 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2637
2638 /* re-schedule */
2639 rt_schedule();
2640
2641 /* resume from suspend state */
2642 if (thread->error != RT_EOK)
2643 {
2644 /* return error */
2645 return thread->error;
2646 }
2647
2648 level = rt_spin_lock_irqsave(&(mb->spinlock));
2649
2650 /* if it's not waiting forever and then re-calculate timeout tick */
2651 if (timeout > 0)
2652 {
2653 tick_delta = rt_tick_get() - tick_delta;
2654 timeout -= tick_delta;
2655 if (timeout < 0)
2656 timeout = 0;
2657 }
2658 }
2659
2660 /* set ptr */
2661 mb->msg_pool[mb->in_offset] = value;
2662 /* increase input offset */
2663 ++ mb->in_offset;
2664 if (mb->in_offset >= mb->size)
2665 mb->in_offset = 0;
2666
2667 if(mb->entry < RT_MB_ENTRY_MAX)
2668 {
2669 /* increase message entry */
2670 mb->entry ++;
2671 }
2672 else
2673 {
2674 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2675 return -RT_EFULL; /* value overflowed */
2676 }
2677
2678 /* resume suspended thread */
2680 {
2682
2683 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2684
2685 rt_schedule();
2686
2687 return RT_EOK;
2688 }
2689 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2690
2691 return RT_EOK;
2692}
2693
2695 rt_ubase_t value,
2696 rt_int32_t timeout)
2697{
2698 return _rt_mb_send_wait(mb, value, timeout, RT_UNINTERRUPTIBLE);
2699}
2701
2703 rt_ubase_t value,
2704 rt_int32_t timeout)
2705{
2706 return _rt_mb_send_wait(mb, value, timeout, RT_INTERRUPTIBLE);
2707}
2709
2711 rt_ubase_t value,
2712 rt_int32_t timeout)
2713{
2714 return _rt_mb_send_wait(mb, value, timeout, RT_KILLABLE);
2715}
2735{
2736 return rt_mb_send_wait(mb, value, 0);
2737}
2739
2745
2747{
2748 return rt_mb_send_wait_killable(mb, value, 0);
2749}
2751
2769{
2770 rt_base_t level;
2771
2772 /* parameter check */
2773 RT_ASSERT(mb != RT_NULL);
2775
2776 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(mb->parent.parent)));
2777
2778 level = rt_spin_lock_irqsave(&(mb->spinlock));
2779
2780 if (mb->entry == mb->size)
2781 {
2782 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2783 return -RT_EFULL;
2784 }
2785
2786 /* rewind to the previous position */
2787 if (mb->out_offset > 0)
2788 {
2789 mb->out_offset --;
2790 }
2791 else
2792 {
2793 mb->out_offset = mb->size - 1;
2794 }
2795
2796 /* set ptr */
2797 mb->msg_pool[mb->out_offset] = value;
2798
2799 /* increase message entry */
2800 mb->entry ++;
2801
2802 /* resume suspended thread */
2804 {
2806
2807 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2808
2809 rt_schedule();
2810
2811 return RT_EOK;
2812 }
2813 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2814
2815 return RT_EOK;
2816}
2818
2819
2845static rt_err_t _rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout, int suspend_flag)
2846{
2847 struct rt_thread *thread;
2848 rt_base_t level;
2849 rt_uint32_t tick_delta;
2850 rt_err_t ret;
2851
2852 /* parameter check */
2853 RT_ASSERT(mb != RT_NULL);
2855
2856 /* current context checking */
2857 RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
2858
2859 /* initialize delta tick */
2860 tick_delta = 0;
2861 /* get current thread */
2862 thread = rt_thread_self();
2863
2864 RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(mb->parent.parent)));
2865
2866 level = rt_spin_lock_irqsave(&(mb->spinlock));
2867
2868 /* for non-blocking call */
2869 if (mb->entry == 0 && timeout == 0)
2870 {
2871 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2872
2873 return -RT_ETIMEOUT;
2874 }
2875
2876 /* mailbox is empty */
2877 while (mb->entry == 0)
2878 {
2879 /* reset error number in thread */
2880 thread->error = -RT_EINTR;
2881
2882 /* no waiting, return timeout */
2883 if (timeout == 0)
2884 {
2885 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2886
2887 thread->error = -RT_ETIMEOUT;
2888
2889 return -RT_ETIMEOUT;
2890 }
2891
2892 /* suspend current thread */
2893 ret = rt_thread_suspend_to_list(thread, &(mb->parent.suspend_thread),
2894 mb->parent.parent.flag, suspend_flag);
2895 if (ret != RT_EOK)
2896 {
2897 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2898 return ret;
2899 }
2900
2901 /* has waiting time, start thread timer */
2902 if (timeout > 0)
2903 {
2904 /* get the start tick of timer */
2905 tick_delta = rt_tick_get();
2906
2907 LOG_D("mb_recv: start timer of thread:%s",
2908 thread->parent.name);
2909
2910 /* reset the timeout of thread timer and start it */
2911 rt_timer_control(&(thread->thread_timer),
2913 &timeout);
2914 rt_timer_start(&(thread->thread_timer));
2915 }
2916
2917 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2918
2919 /* re-schedule */
2920 rt_schedule();
2921
2922 /* resume from suspend state */
2923 if (thread->error != RT_EOK)
2924 {
2925 /* return error */
2926 return thread->error;
2927 }
2928 level = rt_spin_lock_irqsave(&(mb->spinlock));
2929
2930 /* if it's not waiting forever and then re-calculate timeout tick */
2931 if (timeout > 0)
2932 {
2933 tick_delta = rt_tick_get() - tick_delta;
2934 timeout -= tick_delta;
2935 if (timeout < 0)
2936 timeout = 0;
2937 }
2938 }
2939
2940 /* fill ptr */
2941 *value = mb->msg_pool[mb->out_offset];
2942
2943 /* increase output offset */
2944 ++ mb->out_offset;
2945 if (mb->out_offset >= mb->size)
2946 mb->out_offset = 0;
2947
2948 /* decrease message entry */
2949 if(mb->entry > 0)
2950 {
2951 mb->entry --;
2952 }
2953
2954 /* resume suspended thread */
2956 {
2958
2959 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2960
2961 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mb->parent.parent)));
2962
2963 rt_schedule();
2964
2965 return RT_EOK;
2966 }
2967 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
2968
2969 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mb->parent.parent)));
2970
2971 return RT_EOK;
2972}
2973
2975{
2976 return _rt_mb_recv(mb, value, timeout, RT_UNINTERRUPTIBLE);
2977}
2979
2981{
2982 return _rt_mb_recv(mb, value, timeout, RT_INTERRUPTIBLE);
2983}
2985
2987{
2988 return _rt_mb_recv(mb, value, timeout, RT_KILLABLE);
2989}
2991
3006rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg)
3007{
3008 rt_base_t level;
3009
3010 RT_UNUSED(arg);
3011
3012 /* parameter check */
3013 RT_ASSERT(mb != RT_NULL);
3015
3016 if (cmd == RT_IPC_CMD_RESET)
3017 {
3018 level = rt_spin_lock_irqsave(&(mb->spinlock));
3019
3020 /* resume all waiting thread */
3022 /* also resume all mailbox private suspended thread */
3024
3025 /* re-init mailbox */
3026 mb->entry = 0;
3027 mb->in_offset = 0;
3028 mb->out_offset = 0;
3029
3030 rt_spin_unlock_irqrestore(&(mb->spinlock), level);
3031
3032 rt_schedule();
3033
3034 return RT_EOK;
3035 }
3036
3037 return -RT_ERROR;
3038}
3040
3042#endif /* RT_USING_MAILBOX */
3043
3044#ifdef RT_USING_MESSAGEQUEUE
3049
3093 const char *name,
3094 void *msgpool,
3095 rt_size_t msg_size,
3096 rt_size_t pool_size,
3097 rt_uint8_t flag)
3098{
3099 struct rt_mq_message *head;
3100 rt_base_t temp;
3101 register rt_size_t msg_align_size;
3102
3103 /* parameter check */
3104 RT_ASSERT(mq != RT_NULL);
3105 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
3106
3107 /* initialize object */
3109
3110 /* set parent flag */
3111 mq->parent.parent.flag = flag;
3112
3113 /* initialize ipc object */
3114 _ipc_object_init(&(mq->parent));
3115
3116 /* set message pool */
3117 mq->msg_pool = msgpool;
3118
3119 /* get correct message size */
3120 msg_align_size = RT_ALIGN(msg_size, RT_ALIGN_SIZE);
3121 mq->msg_size = msg_size;
3122 mq->max_msgs = pool_size / (msg_align_size + sizeof(struct rt_mq_message));
3123
3124 if (0 == mq->max_msgs)
3125 {
3126 return -RT_EINVAL;
3127 }
3128
3129 /* initialize message list */
3130 mq->msg_queue_head = RT_NULL;
3131 mq->msg_queue_tail = RT_NULL;
3132
3133 /* initialize message empty list */
3134 mq->msg_queue_free = RT_NULL;
3135 for (temp = 0; temp < mq->max_msgs; temp ++)
3136 {
3137 head = (struct rt_mq_message *)((rt_uint8_t *)mq->msg_pool +
3138 temp * (msg_align_size + sizeof(struct rt_mq_message)));
3139 head->next = (struct rt_mq_message *)mq->msg_queue_free;
3140 mq->msg_queue_free = head;
3141 }
3142
3143 /* the initial entry is zero */
3144 mq->entry = 0;
3145
3146 /* initialize an additional list of sender suspend thread */
3149
3150 return RT_EOK;
3151}
3153
3154
3174{
3175 rt_base_t level;
3176
3177 /* parameter check */
3178 RT_ASSERT(mq != RT_NULL);
3181
3182 level = rt_spin_lock_irqsave(&(mq->spinlock));
3183 /* resume all suspended thread */
3185 /* also resume all message queue private suspended thread */
3187 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3188
3189 /* detach message queue object */
3191
3192 return RT_EOK;
3193}
3195
3196#ifdef RT_USING_HEAP
3229rt_mq_t rt_mq_create(const char *name,
3230 rt_size_t msg_size,
3231 rt_size_t max_msgs,
3232 rt_uint8_t flag)
3233{
3234 struct rt_messagequeue *mq;
3235 struct rt_mq_message *head;
3236 rt_base_t temp;
3237 register rt_size_t msg_align_size;
3238
3239 RT_ASSERT((flag == RT_IPC_FLAG_FIFO) || (flag == RT_IPC_FLAG_PRIO));
3240
3242
3243 /* allocate object */
3245 if (mq == RT_NULL)
3246 return mq;
3247
3248 /* set parent */
3249 mq->parent.parent.flag = flag;
3250
3251 /* initialize ipc object */
3252 _ipc_object_init(&(mq->parent));
3253
3254 /* initialize message queue */
3255
3256 /* get correct message size */
3257 msg_align_size = RT_ALIGN(msg_size, RT_ALIGN_SIZE);
3258 mq->msg_size = msg_size;
3259 mq->max_msgs = max_msgs;
3260
3261 /* allocate message pool */
3262 mq->msg_pool = RT_KERNEL_MALLOC((msg_align_size + sizeof(struct rt_mq_message)) * mq->max_msgs);
3263 if (mq->msg_pool == RT_NULL)
3264 {
3266
3267 return RT_NULL;
3268 }
3269
3270 /* initialize message list */
3271 mq->msg_queue_head = RT_NULL;
3272 mq->msg_queue_tail = RT_NULL;
3273
3274 /* initialize message empty list */
3275 mq->msg_queue_free = RT_NULL;
3276 for (temp = 0; temp < mq->max_msgs; temp ++)
3277 {
3278 head = (struct rt_mq_message *)((rt_uint8_t *)mq->msg_pool +
3279 temp * (msg_align_size + sizeof(struct rt_mq_message)));
3280 head->next = (struct rt_mq_message *)mq->msg_queue_free;
3281 mq->msg_queue_free = head;
3282 }
3283
3284 /* the initial entry is zero */
3285 mq->entry = 0;
3286
3287 /* initialize an additional list of sender suspend thread */
3290
3291 return mq;
3292}
3294
3295
3316{
3317 /* parameter check */
3318 RT_ASSERT(mq != RT_NULL);
3321
3323
3324 rt_spin_lock(&(mq->spinlock));
3325 /* resume all suspended thread */
3327 /* also resume all message queue private suspended thread */
3329
3330 rt_spin_unlock(&(mq->spinlock));
3331
3332 /* free message queue pool */
3334
3335 /* delete message queue object */
3337
3338 return RT_EOK;
3339}
3341#endif /* RT_USING_HEAP */
3342
3376static rt_err_t _rt_mq_send_wait(rt_mq_t mq,
3377 const void *buffer,
3378 rt_size_t size,
3379 rt_int32_t prio,
3380 rt_int32_t timeout,
3381 int suspend_flag)
3382{
3383 rt_base_t level;
3384 struct rt_mq_message *msg;
3385 rt_uint32_t tick_delta;
3386 struct rt_thread *thread;
3387 rt_err_t ret;
3388
3389 RT_UNUSED(prio);
3390
3391 /* parameter check */
3392 RT_ASSERT(mq != RT_NULL);
3394 RT_ASSERT(buffer != RT_NULL);
3395 RT_ASSERT(size != 0);
3396
3397 /* current context checking */
3398 RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
3399
3400 /* greater than one message size */
3401 if (size > mq->msg_size)
3402 return -RT_ERROR;
3403
3404 /* initialize delta tick */
3405 tick_delta = 0;
3406 /* get current thread */
3407 thread = rt_thread_self();
3408
3409 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(mq->parent.parent)));
3410
3411 level = rt_spin_lock_irqsave(&(mq->spinlock));
3412
3413 /* get a free list, there must be an empty item */
3414 msg = (struct rt_mq_message *)mq->msg_queue_free;
3415 /* for non-blocking call */
3416 if (msg == RT_NULL && timeout == 0)
3417 {
3418 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3419
3420 return -RT_EFULL;
3421 }
3422
3423 /* message queue is full */
3424 while ((msg = (struct rt_mq_message *)mq->msg_queue_free) == RT_NULL)
3425 {
3426 /* reset error number in thread */
3427 thread->error = -RT_EINTR;
3428
3429 /* no waiting, return timeout */
3430 if (timeout == 0)
3431 {
3432 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3433
3434 return -RT_EFULL;
3435 }
3436
3437 /* suspend current thread */
3439 mq->parent.parent.flag, suspend_flag);
3440 if (ret != RT_EOK)
3441 {
3442 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3443 return ret;
3444 }
3445
3446 /* has waiting time, start thread timer */
3447 if (timeout > 0)
3448 {
3449 /* get the start tick of timer */
3450 tick_delta = rt_tick_get();
3451
3452 LOG_D("mq_send_wait: start timer of thread:%s",
3453 thread->parent.name);
3454
3455 /* reset the timeout of thread timer and start it */
3456 rt_timer_control(&(thread->thread_timer),
3458 &timeout);
3459 rt_timer_start(&(thread->thread_timer));
3460 }
3461
3462 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3463
3464 /* re-schedule */
3465 rt_schedule();
3466
3467 /* resume from suspend state */
3468 if (thread->error != RT_EOK)
3469 {
3470 /* return error */
3471 return thread->error;
3472 }
3473 level = rt_spin_lock_irqsave(&(mq->spinlock));
3474
3475 /* if it's not waiting forever and then re-calculate timeout tick */
3476 if (timeout > 0)
3477 {
3478 tick_delta = rt_tick_get() - tick_delta;
3479 timeout -= tick_delta;
3480 if (timeout < 0)
3481 timeout = 0;
3482 }
3483 }
3484
3485 /* move free list pointer */
3486 mq->msg_queue_free = msg->next;
3487
3488 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3489
3490 /* the msg is the new tailer of list, the next shall be NULL */
3491 msg->next = RT_NULL;
3492
3493 /* add the length */
3494 ((struct rt_mq_message *)msg)->length = size;
3495 /* copy buffer */
3496 rt_memcpy(GET_MESSAGEBYTE_ADDR(msg), buffer, size);
3497
3498 /* disable interrupt */
3499 level = rt_spin_lock_irqsave(&(mq->spinlock));
3500#ifdef RT_USING_MESSAGEQUEUE_PRIORITY
3501 msg->prio = prio;
3502 if (mq->msg_queue_head == RT_NULL)
3503 mq->msg_queue_head = msg;
3504
3505 struct rt_mq_message *node, *prev_node = RT_NULL;
3506 for (node = mq->msg_queue_head; node != RT_NULL; node = node->next)
3507 {
3508 if (node->prio < msg->prio)
3509 {
3510 if (prev_node == RT_NULL)
3511 mq->msg_queue_head = msg;
3512 else
3513 prev_node->next = msg;
3514 msg->next = node;
3515 break;
3516 }
3517 if (node->next == RT_NULL)
3518 {
3519 if (node != msg)
3520 node->next = msg;
3521 mq->msg_queue_tail = msg;
3522 break;
3523 }
3524 prev_node = node;
3525 }
3526#else
3527 /* link msg to message queue */
3528 if (mq->msg_queue_tail != RT_NULL)
3529 {
3530 /* if the tail exists, */
3531 ((struct rt_mq_message *)mq->msg_queue_tail)->next = msg;
3532 }
3533
3534 /* set new tail */
3535 mq->msg_queue_tail = msg;
3536 /* if the head is empty, set head */
3537 if (mq->msg_queue_head == RT_NULL)
3538 mq->msg_queue_head = msg;
3539#endif
3540
3541 if(mq->entry < RT_MQ_ENTRY_MAX)
3542 {
3543 /* increase message entry */
3544 mq->entry ++;
3545 }
3546 else
3547 {
3548 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3549 return -RT_EFULL; /* value overflowed */
3550 }
3551
3552 /* resume suspended thread */
3554 {
3556
3557 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3558
3559 rt_schedule();
3560
3561 return RT_EOK;
3562 }
3563 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3564
3565 return RT_EOK;
3566}
3567
3569 const void *buffer,
3570 rt_size_t size,
3571 rt_int32_t timeout)
3572{
3573 return _rt_mq_send_wait(mq, buffer, size, 0, timeout, RT_UNINTERRUPTIBLE);
3574}
3576
3578 const void *buffer,
3579 rt_size_t size,
3580 rt_int32_t timeout)
3581{
3582 return _rt_mq_send_wait(mq, buffer, size, 0, timeout, RT_INTERRUPTIBLE);
3583}
3585
3587 const void *buffer,
3588 rt_size_t size,
3589 rt_int32_t timeout)
3590{
3591 return _rt_mq_send_wait(mq, buffer, size, 0, timeout, RT_KILLABLE);
3592}
3616rt_err_t rt_mq_send(rt_mq_t mq, const void *buffer, rt_size_t size)
3617{
3618 return rt_mq_send_wait(mq, buffer, size, 0);
3619}
3621
3623{
3624 return rt_mq_send_wait_interruptible(mq, buffer, size, 0);
3625}
3627
3628rt_err_t rt_mq_send_killable(rt_mq_t mq, const void *buffer, rt_size_t size)
3629{
3630 return rt_mq_send_wait_killable(mq, buffer, size, 0);
3631}
3651rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size)
3652{
3653 rt_base_t level;
3654 struct rt_mq_message *msg;
3655
3656 /* parameter check */
3657 RT_ASSERT(mq != RT_NULL);
3659 RT_ASSERT(buffer != RT_NULL);
3660 RT_ASSERT(size != 0);
3661
3662 /* greater than one message size */
3663 if (size > mq->msg_size)
3664 return -RT_ERROR;
3665
3666 RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(mq->parent.parent)));
3667
3668 level = rt_spin_lock_irqsave(&(mq->spinlock));
3669
3670 /* get a free list, there must be an empty item */
3671 msg = (struct rt_mq_message *)mq->msg_queue_free;
3672 /* message queue is full */
3673 if (msg == RT_NULL)
3674 {
3675 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3676
3677 return -RT_EFULL;
3678 }
3679 /* move free list pointer */
3680 mq->msg_queue_free = msg->next;
3681
3682 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3683
3684 /* add the length */
3685 ((struct rt_mq_message *)msg)->length = size;
3686 /* copy buffer */
3687 rt_memcpy(GET_MESSAGEBYTE_ADDR(msg), buffer, size);
3688
3689 level = rt_spin_lock_irqsave(&(mq->spinlock));
3690
3691 /* link msg to the beginning of message queue */
3692 msg->next = (struct rt_mq_message *)mq->msg_queue_head;
3693 mq->msg_queue_head = msg;
3694
3695 /* if there is no tail */
3696 if (mq->msg_queue_tail == RT_NULL)
3697 mq->msg_queue_tail = msg;
3698
3699 if(mq->entry < RT_MQ_ENTRY_MAX)
3700 {
3701 /* increase message entry */
3702 mq->entry ++;
3703 }
3704 else
3705 {
3706 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3707 return -RT_EFULL; /* value overflowed */
3708 }
3709
3710 /* resume suspended thread */
3712 {
3714
3715 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3716
3717 rt_schedule();
3718
3719 return RT_EOK;
3720 }
3721
3722 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3723
3724 return RT_EOK;
3725}
3727
3758static rt_ssize_t _rt_mq_recv(rt_mq_t mq,
3759 void *buffer,
3760 rt_size_t size,
3761 rt_int32_t *prio,
3762 rt_int32_t timeout,
3763 int suspend_flag)
3764{
3765 struct rt_thread *thread;
3766 rt_base_t level;
3767 struct rt_mq_message *msg;
3768 rt_uint32_t tick_delta;
3769 rt_err_t ret;
3770 rt_size_t len;
3771
3772 RT_UNUSED(prio);
3773
3774 /* parameter check */
3775 RT_ASSERT(mq != RT_NULL);
3777 RT_ASSERT(buffer != RT_NULL);
3778 RT_ASSERT(size != 0);
3779
3780 /* current context checking */
3781 RT_DEBUG_SCHEDULER_AVAILABLE(timeout != 0);
3782
3783 /* initialize delta tick */
3784 tick_delta = 0;
3785 /* get current thread */
3786 thread = rt_thread_self();
3787 RT_OBJECT_HOOK_CALL(rt_object_trytake_hook, (&(mq->parent.parent)));
3788
3789 level = rt_spin_lock_irqsave(&(mq->spinlock));
3790
3791 /* for non-blocking call */
3792 if (mq->entry == 0 && timeout == 0)
3793 {
3794 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3795
3796 return -RT_ETIMEOUT;
3797 }
3798
3799 /* message queue is empty */
3800 while (mq->entry == 0)
3801 {
3802 /* reset error number in thread */
3803 thread->error = -RT_EINTR;
3804
3805 /* no waiting, return timeout */
3806 if (timeout == 0)
3807 {
3808 /* enable interrupt */
3809 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3810
3811 thread->error = -RT_ETIMEOUT;
3812
3813 return -RT_ETIMEOUT;
3814 }
3815
3816 /* suspend current thread */
3817 ret = rt_thread_suspend_to_list(thread, &(mq->parent.suspend_thread),
3818 mq->parent.parent.flag, suspend_flag);
3819 if (ret != RT_EOK)
3820 {
3821 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3822 return ret;
3823 }
3824
3825 /* has waiting time, start thread timer */
3826 if (timeout > 0)
3827 {
3828 /* get the start tick of timer */
3829 tick_delta = rt_tick_get();
3830
3831 LOG_D("set thread:%s to timer list",
3832 thread->parent.name);
3833
3834 /* reset the timeout of thread timer and start it */
3835 rt_timer_control(&(thread->thread_timer),
3837 &timeout);
3838 rt_timer_start(&(thread->thread_timer));
3839 }
3840
3841 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3842
3843 /* re-schedule */
3844 rt_schedule();
3845
3846 /* recv message */
3847 if (thread->error != RT_EOK)
3848 {
3849 /* return error */
3850 return thread->error;
3851 }
3852
3853 level = rt_spin_lock_irqsave(&(mq->spinlock));
3854
3855 /* if it's not waiting forever and then re-calculate timeout tick */
3856 if (timeout > 0)
3857 {
3858 tick_delta = rt_tick_get() - tick_delta;
3859 timeout -= tick_delta;
3860 if (timeout < 0)
3861 timeout = 0;
3862 }
3863 }
3864
3865 /* get message from queue */
3866 msg = (struct rt_mq_message *)mq->msg_queue_head;
3867
3868 /* move message queue head */
3869 mq->msg_queue_head = msg->next;
3870 /* reach queue tail, set to NULL */
3871 if (mq->msg_queue_tail == msg)
3872 mq->msg_queue_tail = RT_NULL;
3873
3874 /* decrease message entry */
3875 if(mq->entry > 0)
3876 {
3877 mq->entry --;
3878 }
3879
3880 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3881
3882 /* get real message length */
3883 len = ((struct rt_mq_message *)msg)->length;
3884
3885 if (len > size)
3886 len = size;
3887 /* copy message */
3888 rt_memcpy(buffer, GET_MESSAGEBYTE_ADDR(msg), len);
3889
3890#ifdef RT_USING_MESSAGEQUEUE_PRIORITY
3891 if (prio != RT_NULL)
3892 *prio = msg->prio;
3893#endif
3894 level = rt_spin_lock_irqsave(&(mq->spinlock));
3895 /* put message to free list */
3896 msg->next = (struct rt_mq_message *)mq->msg_queue_free;
3897 mq->msg_queue_free = msg;
3898
3899 /* resume suspended thread */
3901 {
3903
3904 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3905
3906 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mq->parent.parent)));
3907
3908 rt_schedule();
3909
3910 return len;
3911 }
3912
3913 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
3914
3915 RT_OBJECT_HOOK_CALL(rt_object_take_hook, (&(mq->parent.parent)));
3916
3917 return len;
3918}
3919
3921 void *buffer,
3922 rt_size_t size,
3923 rt_int32_t timeout)
3924{
3925 return _rt_mq_recv(mq, buffer, size, 0, timeout, RT_UNINTERRUPTIBLE);
3926}
3928
3930 void *buffer,
3931 rt_size_t size,
3932 rt_int32_t timeout)
3933{
3934 return _rt_mq_recv(mq, buffer, size, 0, timeout, RT_INTERRUPTIBLE);
3935}
3937
3939 void *buffer,
3940 rt_size_t size,
3941 rt_int32_t timeout)
3942{
3943 return _rt_mq_recv(mq, buffer, size, 0, timeout, RT_KILLABLE);
3944}
3945#ifdef RT_USING_MESSAGEQUEUE_PRIORITY
3946rt_err_t rt_mq_send_wait_prio(rt_mq_t mq,
3947 const void *buffer,
3948 rt_size_t size,
3949 rt_int32_t prio,
3950 rt_int32_t timeout,
3951 int suspend_flag)
3952{
3953 return _rt_mq_send_wait(mq, buffer, size, prio, timeout, suspend_flag);
3954}
3955rt_ssize_t rt_mq_recv_prio(rt_mq_t mq,
3956 void *buffer,
3957 rt_size_t size,
3958 rt_int32_t *prio,
3959 rt_int32_t timeout,
3960 int suspend_flag)
3961{
3962 return _rt_mq_recv(mq, buffer, size, prio, timeout, suspend_flag);
3963}
3964#endif
3980rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg)
3981{
3982 rt_base_t level;
3983 struct rt_mq_message *msg;
3984
3985 RT_UNUSED(arg);
3986
3987 /* parameter check */
3988 RT_ASSERT(mq != RT_NULL);
3990
3991 if (cmd == RT_IPC_CMD_RESET)
3992 {
3993 level = rt_spin_lock_irqsave(&(mq->spinlock));
3994
3995 /* resume all waiting thread */
3997 /* also resume all message queue private suspended thread */
3999
4000 /* release all message in the queue */
4001 while (mq->msg_queue_head != RT_NULL)
4002 {
4003 /* get message from queue */
4004 msg = (struct rt_mq_message *)mq->msg_queue_head;
4005
4006 /* move message queue head */
4007 mq->msg_queue_head = msg->next;
4008 /* reach queue tail, set to NULL */
4009 if (mq->msg_queue_tail == msg)
4010 mq->msg_queue_tail = RT_NULL;
4011
4012 /* put message to free list */
4013 msg->next = (struct rt_mq_message *)mq->msg_queue_free;
4014 mq->msg_queue_free = msg;
4015 }
4016
4017 /* clean entry */
4018 mq->entry = 0;
4019
4020 rt_spin_unlock_irqrestore(&(mq->spinlock), level);
4021
4022 rt_schedule();
4023
4024 return RT_EOK;
4025 }
4026
4027 return -RT_ERROR;
4028}
4030
4032#endif /* RT_USING_MESSAGEQUEUE */
#define RT_ALIGN(size, align)
rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg)
This function will get or set some options of the timer
rt_err_t rt_timer_start(rt_timer_t timer)
This function will start the timer
rt_tick_t rt_tick_get(void)
This function will return current tick from operating system startup.
定义 clock.c:69
#define RT_TIMER_CTRL_SET_TIME
#define RT_IPC_FLAG_FIFO
#define RT_IPC_FLAG_PRIO
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
This function will disable the local interrupt and then lock the spinlock, will lock the thread sched...
rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list, rt_err_t thread_error, struct rt_spinlock *lock)
This function will resume all suspended threads in the IPC object list, including the suspended list ...
定义 ipc.c:204
rt_inline void _thread_update_priority(struct rt_thread *thread, rt_uint8_t priority, int suspend_flag)
定义 ipc.c:868
rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error)
This function will resume all suspended threads in the IPC object list, including the suspended list ...
定义 ipc.c:165
rt_inline rt_err_t _ipc_object_init(struct rt_ipc_object *ipc)
This function will initialize an IPC object, such as semaphore, mutex, messagequeue and mailbox.
定义 ipc.c:82
#define RT_WAITING_NO
void rt_susp_list_print(rt_list_t *list)
Print thread on suspend list to system console
定义 ipc.c:290
#define RT_IPC_CMD_RESET
rt_inline rt_uint8_t _thread_get_mutex_priority(struct rt_thread *thread)
定义 ipc.c:845
void rt_spin_lock(struct rt_spinlock *lock)
This function will lock the spinlock, will lock the thread scheduler.
void rt_spin_lock_init(struct rt_spinlock *lock)
Initialize a static spinlock object.
void rt_spin_unlock(struct rt_spinlock *lock)
This function will unlock the spinlock, will unlock the thread scheduler.
rt_inline rt_uint8_t _mutex_update_priority(struct rt_mutex *mutex)
定义 ipc.c:827
struct rt_thread * rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error)
Dequeue a thread from suspended list and set it to ready. The 2 are taken as an atomic operation,...
定义 ipc.c:105
rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags)
Add a thread to the suspend list
定义 ipc.c:241
#define RT_IPC_CMD_SET_VLIMIT
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
This function will unlock the spinlock and then restore current cpu interrupt status,...
void rt_object_delete(rt_object_t object)
This function will delete an object and release object memory.
void rt_object_init(struct rt_object *object, enum rt_object_class_type type, const char *name)
This function will initialize an object and add it to object system management.
rt_bool_t rt_object_is_systemobject(rt_object_t object)
This function will judge the object is system object or not.
#define RT_OBJECT_HOOK_CALL(func, argv)
rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name)
This function will allocate an object from object system.
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
void rt_object_detach(rt_object_t object)
This function will detach a static object from object system, and the memory of static object is not ...
@ RT_Object_Class_MessageQueue
@ RT_Object_Class_MailBox
@ RT_Object_Class_Semaphore
@ RT_Object_Class_Event
@ RT_Object_Class_Mutex
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
#define rt_list_for_each(pos, head)
rt_inline int rt_list_isempty(const rt_list_t *l)
tests whether a list is empty
#define rt_kprintf(...)
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
#define rt_list_entry(node, type, member)
get the struct for this entry
#define RT_DEBUG_NOT_IN_INTERRUPT
#define RT_ASSERT(EX)
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
#define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
#define RT_DEBUG_IN_THREAD_CONTEXT
rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag)
This function will suspend the specified thread and change it to suspend state.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
struct rt_thread * rt_thread_t
void rt_schedule(void)
This function will perform one scheduling. It will select one thread with the highest priority level ...
@ RT_KILLABLE
@ RT_INTERRUPTIBLE
@ RT_UNINTERRUPTIBLE
#define RT_EVENT_FLAG_OR
rt_err_t rt_event_recv_interruptible(rt_event_t event, rt_uint32_t set, rt_uint8_t option, rt_int32_t timeout, rt_uint32_t *recved)
定义 ipc.c:2232
rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg)
This function will set some extra attributions of an event object.
定义 ipc.c:2265
rt_event_t rt_event_create(const char *name, rt_uint8_t flag)
Creating an event object.
定义 ipc.c:1878
rt_err_t rt_event_detach(rt_event_t event)
This function will detach a static event object.
定义 ipc.c:1828
#define RT_EVENT_FLAG_AND
rt_err_t rt_event_recv(rt_event_t event, rt_uint32_t set, rt_uint8_t option, rt_int32_t timeout, rt_uint32_t *recved)
定义 ipc.c:2222
rt_err_t rt_event_delete(rt_event_t event)
This function will delete an event object and release the memory space.
定义 ipc.c:1924
rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set)
This function will send an event to the event object. If there is a thread suspended on the event,...
定义 ipc.c:1964
rt_err_t rt_event_init(rt_event_t event, const char *name, rt_uint8_t flag)
The function will initialize a static event object.
定义 ipc.c:1786
rt_err_t rt_event_recv_killable(rt_event_t event, rt_uint32_t set, rt_uint8_t option, rt_int32_t timeout, rt_uint32_t *recved)
定义 ipc.c:2242
struct rt_event * rt_event_t
#define RT_EVENT_FLAG_CLEAR
rt_err_t rt_mb_recv_killable(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout)
定义 ipc.c:2986
rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg)
This function will set some extra attributions of a mailbox object.
定义 ipc.c:3006
rt_err_t rt_mb_send_wait_killable(rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout)
定义 ipc.c:2710
rt_err_t rt_mb_detach(rt_mailbox_t mb)
This function will detach a static mailbox object.
定义 ipc.c:2395
rt_err_t rt_mb_send_killable(rt_mailbox_t mb, rt_ubase_t value)
定义 ipc.c:2746
rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout)
定义 ipc.c:2974
rt_err_t rt_mb_delete(rt_mailbox_t mb)
This function will delete a mailbox object and release the memory space.
定义 ipc.c:2510
rt_mailbox_t rt_mb_create(const char *name, rt_size_t size, rt_uint8_t flag)
Creating a mailbox object.
定义 ipc.c:2450
rt_err_t rt_mb_recv_interruptible(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout)
定义 ipc.c:2980
struct rt_mailbox * rt_mailbox_t
rt_err_t rt_mb_send_interruptible(rt_mailbox_t mb, rt_ubase_t value)
定义 ipc.c:2740
rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value)
This function will send an urgent mail to the mailbox object.
定义 ipc.c:2768
rt_err_t rt_mb_init(rt_mailbox_t mb, const char *name, void *msgpool, rt_size_t size, rt_uint8_t flag)
Initialize a static mailbox object.
定义 ipc.c:2343
rt_err_t rt_mb_send(rt_mailbox_t mb, rt_ubase_t value)
This function will send an mail to the mailbox object. If there is a thread suspended on the mailbox,...
定义 ipc.c:2734
rt_err_t rt_mb_send_wait(rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout)
定义 ipc.c:2694
rt_err_t rt_mb_send_wait_interruptible(rt_mailbox_t mb, rt_ubase_t value, rt_int32_t timeout)
定义 ipc.c:2702
rt_err_t rt_mq_send_wait(rt_mq_t mq, const void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3568
rt_err_t rt_mq_detach(rt_mq_t mq)
This function will detach a static messagequeue object.
定义 ipc.c:3173
rt_mq_t rt_mq_create(const char *name, rt_size_t msg_size, rt_size_t max_msgs, rt_uint8_t flag)
Creating a messagequeue object.
定义 ipc.c:3229
rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg)
This function will set some extra attributions of a messagequeue object.
定义 ipc.c:3980
rt_err_t rt_mq_send_wait_killable(rt_mq_t mq, const void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3586
struct rt_messagequeue * rt_mq_t
rt_ssize_t rt_mq_recv_killable(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3938
rt_err_t rt_mq_send_killable(rt_mq_t mq, const void *buffer, rt_size_t size)
定义 ipc.c:3628
rt_err_t rt_mq_send_interruptible(rt_mq_t mq, const void *buffer, rt_size_t size)
定义 ipc.c:3622
rt_ssize_t rt_mq_recv_interruptible(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3929
rt_err_t rt_mq_init(rt_mq_t mq, const char *name, void *msgpool, rt_size_t msg_size, rt_size_t pool_size, rt_uint8_t flag)
Initialize a static messagequeue object.
定义 ipc.c:3092
rt_err_t rt_mq_send(rt_mq_t mq, const void *buffer, rt_size_t size)
This function will send a message to the messagequeue object. If there is a thread suspended on the m...
定义 ipc.c:3616
rt_err_t rt_mq_send_wait_interruptible(rt_mq_t mq, const void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3577
rt_err_t rt_mq_delete(rt_mq_t mq)
This function will delete a messagequeue object and release the memory.
定义 ipc.c:3315
rt_ssize_t rt_mq_recv(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3920
rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size)
This function will send an urgent message to the messagequeue object.
定义 ipc.c:3651
rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg)
This function will set some extra attributions of a mutex object.
定义 ipc.c:1733
rt_err_t rt_mutex_take_interruptible(rt_mutex_t mutex, rt_int32_t time)
定义 ipc.c:1543
rt_mutex_t rt_mutex_create(const char *name, rt_uint8_t flag)
This function will create a mutex object.
定义 ipc.c:1233
rt_err_t rt_mutex_take_killable(rt_mutex_t mutex, rt_int32_t time)
定义 ipc.c:1549
rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t time)
定义 ipc.c:1537
rt_err_t rt_mutex_detach(rt_mutex_t mutex)
This function will detach a static mutex object.
定义 ipc.c:1054
rt_uint8_t rt_mutex_setprioceiling(rt_mutex_t mutex, rt_uint8_t priority)
set the prioceiling attribute of the mutex.
定义 ipc.c:1151
rt_err_t rt_mutex_delete(rt_mutex_t mutex)
This function will delete a mutex object and release this memory space.
定义 ipc.c:1283
void rt_mutex_drop_thread(rt_mutex_t mutex, rt_thread_t thread)
drop a thread from the suspend list of mutex
定义 ipc.c:1078
rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag)
Initialize a static mutex object.
定义 ipc.c:1007
rt_err_t rt_mutex_release(rt_mutex_t mutex)
This function will release a mutex. If there is thread suspended on the mutex, the thread will be res...
定义 ipc.c:1589
struct rt_mutex * rt_mutex_t
rt_err_t rt_mutex_trytake(rt_mutex_t mutex)
This function will try to take a mutex, if the mutex is unavailable, the thread returns immediately.
定义 ipc.c:1570
rt_uint8_t rt_mutex_getprioceiling(rt_mutex_t mutex)
set the prioceiling attribute of the mutex.
定义 ipc.c:1194
rt_err_t rt_sem_init(rt_sem_t sem, const char *name, rt_uint32_t value, rt_uint8_t flag)
This function will initialize a static semaphore object.
定义 ipc.c:376
rt_err_t rt_sem_delete(rt_sem_t sem)
This function will delete a semaphore object and release the memory space.
定义 ipc.c:506
rt_err_t rt_sem_take_killable(rt_sem_t sem, rt_int32_t time)
定义 ipc.c:656
rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg)
This function will set some extra attributions of a semaphore object.
定义 ipc.c:759
rt_sem_t rt_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag)
Creating a semaphore object.
定义 ipc.c:467
rt_err_t rt_sem_take_interruptible(rt_sem_t sem, rt_int32_t time)
定义 ipc.c:650
rt_err_t rt_sem_detach(rt_sem_t sem)
This function will detach a static semaphore object.
定义 ipc.c:413
rt_err_t rt_sem_take(rt_sem_t sem, rt_int32_t time)
定义 ipc.c:644
struct rt_semaphore * rt_sem_t
rt_err_t rt_sem_release(rt_sem_t sem)
This function will release a semaphore. If there is thread suspended on the semaphore,...
定义 ipc.c:695
rt_err_t rt_sem_trytake(rt_sem_t sem)
This function will try to take a semaphore, if the semaphore is unavailable, the thread returns immed...
定义 ipc.c:676
#define GET_MESSAGEBYTE_ADDR(msg)
定义 ipc.c:58
#define LOG_D(...)
#define RT_MQ_ENTRY_MAX
#define RT_MUTEX_HOLD_MAX
#define RT_UNUSED(x)
#define RT_MB_ENTRY_MAX
#define RT_KERNEL_FREE(ptr)
#define RT_SEM_VALUE_MAX
#define RT_KERNEL_MALLOC(sz)
#define RTM_EXPORT(symbol)
定义 rtm.h:33
#define RT_SCHED_DEBUG_IS_LOCKED
#define RT_THREAD_LIST_NODE(thread)
#define RT_SCHED_DEBUG_IS_UNLOCKED
#define RT_THREAD_LIST_NODE_ENTRY(node)
rt_ubase_t rt_sched_lock_level_t
rt_base_t rt_ssize_t
rt_int32_t rt_base_t
rt_base_t rt_uintptr_t
int rt_bool_t
rt_base_t rt_err_t
unsigned char rt_uint8_t
unsigned short rt_uint16_t
#define RT_TRUE
rt_ubase_t rt_size_t
struct rt_list_node rt_list_t
unsigned int rt_uint32_t
#define RT_FALSE
rt_uint32_t rt_ubase_t
#define RT_NULL
signed int rt_int32_t
rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
Update priority of the target thread
rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
rt_uint32_t set
struct rt_spinlock spinlock
struct rt_ipc_object parent
struct rt_object parent
rt_list_t suspend_thread
struct rt_list_node * next
rt_list_t suspend_sender_thread
struct rt_ipc_object parent
rt_uint16_t in_offset
struct rt_spinlock spinlock
rt_uint16_t out_offset
rt_ubase_t * msg_pool
rt_uint16_t entry
rt_uint16_t size
struct rt_ipc_object parent
rt_list_t suspend_sender_thread
rt_uint16_t msg_size
rt_uint16_t max_msgs
rt_uint16_t entry
struct rt_spinlock spinlock
rt_ssize_t length
struct rt_mq_message * next
struct rt_spinlock spinlock
rt_uint8_t ceiling_priority
struct rt_ipc_object parent
rt_list_t taken_list
rt_uint8_t priority
struct rt_thread * owner
rt_uint8_t hold
rt_uint8_t flag
const char * name
rt_uint16_t value
rt_uint16_t max_value
struct rt_spinlock spinlock
struct rt_ipc_object parent
rt_err_t error
struct rt_object parent
rt_object_t pending_object
rt_uint8_t event_info
rt_uint32_t event_set
RT_SCHED_THREAD_CTX struct rt_timer thread_timer
rt_list_t taken_object_list