12#define DBG_TAG "dfs.pcache"
13#define DBG_LVL DBG_WARNING
22#ifdef RT_USING_PAGECACHE
25#include <mm_private.h>
29#ifndef RT_PAGECACHE_COUNT
30#define RT_PAGECACHE_COUNT 4096
33#ifndef RT_PAGECACHE_ASPACE_COUNT
34#define RT_PAGECACHE_ASPACE_COUNT 1024
37#ifndef RT_PAGECACHE_PRELOAD
38#define RT_PAGECACHE_PRELOAD 4
41#ifndef RT_PAGECACHE_GC_WORK_LEVEL
42#define RT_PAGECACHE_GC_WORK_LEVEL 90
45#ifndef RT_PAGECACHE_GC_STOP_LEVEL
46#define RT_PAGECACHE_GC_STOP_LEVEL 70
52struct dfs_aspace_mmap_obj
57 struct rt_varea *varea;
61struct dfs_pcache_mq_obj
63 struct rt_mailbox *ack;
67static struct dfs_page *dfs_page_lookup(
struct dfs_file *file, off_t pos);
68static void dfs_page_ref(
struct dfs_page *page);
69static int dfs_page_inactive(
struct dfs_page *page);
70static int dfs_page_remove(
struct dfs_page *page);
71static void dfs_page_release(
struct dfs_page *page);
72static int dfs_page_dirty(
struct dfs_page *page);
74static int dfs_aspace_release(
struct dfs_aspace *aspace);
76static int dfs_aspace_lock(
struct dfs_aspace *aspace);
77static int dfs_aspace_unlock(
struct dfs_aspace *aspace);
79static int dfs_pcache_lock(
void);
80static int dfs_pcache_unlock(
void);
83static struct dfs_pcache __pcache;
86static int dfs_aspace_gc(
struct dfs_aspace *aspace,
int count)
92 dfs_aspace_lock(aspace);
94 if (aspace->pages_count > 0)
96 struct dfs_page *page =
RT_NULL;
97 rt_list_t *node = aspace->list_inactive.next;
99 while (cnt && node != &aspace->list_active)
103 if (dfs_page_remove(page) == 0)
109 node = aspace->list_active.
next;
110 while (cnt && node != &aspace->list_inactive)
114 if (dfs_page_remove(page) == 0)
121 dfs_aspace_unlock(aspace);
127void dfs_pcache_release(
size_t count)
130 struct dfs_aspace *aspace =
RT_NULL;
136 count =
rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
139 node = __pcache.list_inactive.
next;
140 while (count && node != &__pcache.list_active)
146 count -= dfs_aspace_gc(aspace, count);
147 dfs_aspace_release(aspace);
151 node = __pcache.list_active.
next;
152 while (count && node != &__pcache.list_inactive)
158 count -= dfs_aspace_gc(aspace, count);
165static void _pcache_clean(
struct dfs_mnt *mnt,
int (*cb)(
struct dfs_aspace *aspace))
168 struct dfs_aspace *aspace =
RT_NULL;
172 node = __pcache.list_inactive.
next;
173 while (node != &__pcache.list_active)
177 if (aspace && aspace->mnt == mnt)
179 dfs_aspace_clean(aspace);
184 node = __pcache.list_active.
next;
185 while (node != &__pcache.list_inactive)
189 if (aspace && aspace->mnt == mnt)
191 dfs_aspace_clean(aspace);
199void dfs_pcache_unmount(
struct dfs_mnt *mnt)
201 _pcache_clean(mnt, dfs_aspace_release);
204static int _dummy_cb(
struct dfs_aspace *mnt)
209void dfs_pcache_clean(
struct dfs_mnt *mnt)
211 _pcache_clean(mnt, _dummy_cb);
214static int dfs_pcache_limit_check(
void)
218 while (index &&
rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
220 dfs_pcache_release(0);
227static void dfs_pcache_thread(
void *parameter)
229 struct dfs_pcache_mq_obj work;
235 if (work.cmd == PCACHE_MQ_GC)
237 dfs_pcache_limit_check();
239 else if (work.cmd == PCACHE_MQ_WB)
243 struct dfs_page *page = 0;
252 if (node != &__pcache.list_inactive)
254 struct dfs_aspace *aspace =
rt_list_entry(node,
struct dfs_aspace, cache_node);
255 dfs_aspace_lock(aspace);
256 if (aspace->list_dirty.next != &aspace->list_dirty)
258 page =
rt_list_entry(aspace->list_dirty.next,
struct dfs_page, dirty_node);
260 dfs_aspace_unlock(aspace);
267 dfs_aspace_unlock(aspace);
274 struct dfs_aspace *aspace = page->aspace;
276 dfs_aspace_lock(aspace);
277 if (page->is_dirty == 1 && aspace->vnode)
281 if (aspace->vnode->size < page->fpos + page->size)
283 page->len = aspace->vnode->size - page->fpos;
287 page->len = page->size;
289 if (aspace->ops->write)
291 aspace->ops->write(page);
296 if (page->dirty_node.next !=
RT_NULL)
299 page->dirty_node.next =
RT_NULL;
303 dfs_page_release(page);
304 dfs_aspace_unlock(aspace);
324static int dfs_pcache_init(
void)
328 for (
int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
357 struct dfs_pcache_mq_obj work = { 0 };
361 err =
rt_mq_send_wait(__pcache.mqueue, (
const void *)&work,
sizeof(
struct dfs_pcache_mq_obj), 0);
366static int dfs_pcache_lock(
void)
372static int dfs_pcache_unlock(
void)
378static uint32_t dfs_aspace_hash(
struct dfs_mnt *mnt,
const char *path)
386 val = ((val << 5) + val) + *path++;
390 return (val ^ (
unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
393static struct dfs_aspace *dfs_aspace_hash_lookup(
struct dfs_dentry *dentry,
const struct dfs_aspace_ops *ops)
395 struct dfs_aspace *aspace =
RT_NULL;
401 if (aspace->mnt == dentry->
mnt
402 && aspace->ops == ops
403 && !strcmp(aspace->pathname, dentry->
pathname))
415static void dfs_aspace_insert(
struct dfs_aspace *aspace)
419 val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
428static void dfs_aspace_remove(
struct dfs_aspace *aspace)
431 if (aspace->hash_node.next !=
RT_NULL)
435 if (aspace->cache_node.next !=
RT_NULL)
442static void dfs_aspace_active(
struct dfs_aspace *aspace)
445 if (aspace->cache_node.next !=
RT_NULL)
453static void dfs_aspace_inactive(
struct dfs_aspace *aspace)
456 if (aspace->cache_node.next !=
RT_NULL)
464static struct dfs_aspace *_dfs_aspace_create(
struct dfs_dentry *dentry,
466 const struct dfs_aspace_ops *ops)
468 struct dfs_aspace *aspace;
470 aspace =
rt_calloc(1,
sizeof(
struct dfs_aspace));
478 aspace->avl_root.root_node = 0;
479 aspace->avl_page = 0;
484 aspace->pages_count = 0;
485 aspace->vnode = vnode;
488 if (dentry && dentry->
mnt)
490 aspace->mnt = dentry->
mnt;
492 aspace->pathname = rt_strdup(dentry->
pathname);
495 dfs_aspace_insert(aspace);
501struct dfs_aspace *dfs_aspace_create(
struct dfs_dentry *dentry,
503 const struct dfs_aspace_ops *ops)
505 struct dfs_aspace *aspace =
RT_NULL;
511 aspace = dfs_aspace_hash_lookup(dentry, ops);
516 aspace = _dfs_aspace_create(dentry, vnode, ops);
520 aspace->vnode = vnode;
521 dfs_aspace_active(aspace);
527int dfs_aspace_destroy(
struct dfs_aspace *aspace)
534 dfs_aspace_lock(aspace);
537 dfs_aspace_inactive(aspace);
539 if (dfs_aspace_release(aspace) != 0)
541 dfs_aspace_unlock(aspace);
549static int dfs_aspace_release(
struct dfs_aspace *aspace)
556 dfs_aspace_lock(aspace);
558 if (
rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
560 dfs_aspace_remove(aspace);
561 if (aspace->fullpath)
565 if (aspace->pathname)
575 dfs_aspace_unlock(aspace);
583static int _dfs_aspace_dump(
struct dfs_aspace *aspace,
int is_dirty)
588 struct dfs_page *page;
590 dfs_aspace_lock(aspace);
591 if (aspace->pages_count > 0)
595 if (next != &aspace->list_active)
598 if (is_dirty && page->is_dirty)
600 rt_kprintf(
" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
602 else if (is_dirty == 0)
604 rt_kprintf(
" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
613 dfs_aspace_unlock(aspace);
618static int dfs_pcache_dump(
int argc,
char **argv)
622 struct dfs_aspace *aspace;
626 if (strcmp(argv[1],
"--dump") == 0)
630 else if (strcmp(argv[1],
"--dirty") == 0)
650 if (node != &__pcache.list_inactive)
656 rt_kprintf(
"file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
660 rt_kprintf(
"unknown type, pages: %d\n", aspace->pages_count);
665 _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
676static int dfs_page_unmap(
struct dfs_page *page)
679 struct dfs_mmap *map;
681 next = page->mmap_head.
next;
683 if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
685 dfs_page_dirty(page);
688 while (next != &page->mmap_head)
698 varea = rt_aspace_query(map->aspace, map->vaddr);
700 vaddr = dfs_aspace_vaddr(varea, page->fpos);
702 rt_varea_unmap_page(varea, vaddr);
713static struct dfs_page *dfs_page_create(
void)
715 struct dfs_page *page =
RT_NULL;
717 page =
rt_calloc(1,
sizeof(
struct dfs_page));
720 page->page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
729 LOG_E(
"page alloc failed!\n");
738static void dfs_page_ref(
struct dfs_page *page)
743static void dfs_page_release(
struct dfs_page *page)
745 struct dfs_aspace *aspace = page->aspace;
747 dfs_aspace_lock(aspace);
753 dfs_page_unmap(page);
755 if (page->is_dirty == 1 && aspace->vnode)
757 if (aspace->vnode->size < page->fpos + page->size)
759 page->len = aspace->vnode->size - page->fpos;
763 page->len = page->size;
765 if (aspace->ops->write)
767 aspace->ops->write(page);
773 rt_pages_free(page->page, 0);
778 dfs_aspace_unlock(aspace);
781static int dfs_page_compare(off_t fpos, off_t value)
783 return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
786static int _dfs_page_insert(
struct dfs_aspace *aspace,
struct dfs_page *page)
788 struct dfs_page *tmp;
789 struct util_avl_struct *current = NULL;
790 struct util_avl_struct **next = &(aspace->avl_root.root_node);
798 if (page->fpos < tmp->fpos)
799 next = &(current->avl_left);
800 else if (page->fpos > tmp->fpos)
801 next = &(current->avl_right);
807 util_avl_link(&page->avl_node, current, next);
808 util_avl_rebalance(current, &aspace->avl_root);
809 aspace->avl_page = page;
814static void _dfs_page_remove(
struct dfs_aspace *aspace,
struct dfs_page *page)
816 if (aspace->avl_page && aspace->avl_page == page)
818 aspace->avl_page = 0;
821 util_avl_remove(&page->avl_node, &aspace->avl_root);
824static int dfs_aspace_lock(
struct dfs_aspace *aspace)
830static int dfs_aspace_unlock(
struct dfs_aspace *aspace)
836static int dfs_page_insert(
struct dfs_page *page)
838 struct dfs_aspace *aspace = page->aspace;
840 dfs_aspace_lock(aspace);
843 aspace->pages_count ++;
845 if (_dfs_page_insert(aspace, page))
850 if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
852 rt_list_t *next = aspace->list_active.next;
854 if (next != &aspace->list_inactive)
856 struct dfs_page *tmp =
rt_list_entry(next,
struct dfs_page, space_node);
857 dfs_page_inactive(tmp);
863 dfs_aspace_unlock(aspace);
868static int dfs_page_remove(
struct dfs_page *page)
871 struct dfs_aspace *aspace = page->aspace;
873 dfs_aspace_lock(aspace);
877 if (page->space_node.next !=
RT_NULL)
880 page->space_node.next =
RT_NULL;
881 aspace->pages_count--;
882 _dfs_page_remove(aspace, page);
884 if (page->dirty_node.next !=
RT_NULL)
887 page->dirty_node.next =
RT_NULL;
892 dfs_page_release(page);
896 dfs_aspace_unlock(aspace);
901static int dfs_page_active(
struct dfs_page *page)
903 struct dfs_aspace *aspace = page->aspace;
905 dfs_aspace_lock(aspace);
906 if (page->space_node.next !=
RT_NULL)
911 dfs_aspace_unlock(aspace);
916static int dfs_page_inactive(
struct dfs_page *page)
918 struct dfs_aspace *aspace = page->aspace;
920 dfs_aspace_lock(aspace);
921 if (page->space_node.next !=
RT_NULL)
926 dfs_aspace_unlock(aspace);
931static int dfs_page_dirty(
struct dfs_page *page)
933 struct dfs_aspace *aspace = page->aspace;
935 dfs_aspace_lock(aspace);
937 if (page->dirty_node.next ==
RT_NULL && page->space_node.next !=
RT_NULL)
947 dfs_pcache_mq_work(PCACHE_MQ_WB);
951 dfs_aspace_unlock(aspace);
956static struct dfs_page *dfs_page_search(
struct dfs_aspace *aspace, off_t fpos)
959 struct dfs_page *page;
960 struct util_avl_struct *avl_node;
962 dfs_aspace_lock(aspace);
964 if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
966 page = aspace->avl_page;
967 dfs_page_active(page);
969 dfs_aspace_unlock(aspace);
973 avl_node = aspace->avl_root.root_node;
977 cmp = dfs_page_compare(fpos, page->fpos);
981 avl_node = avl_node->avl_left;
985 avl_node = avl_node->avl_right;
989 aspace->avl_page = page;
990 dfs_page_active(page);
992 dfs_aspace_unlock(aspace);
997 dfs_aspace_unlock(aspace);
1002static struct dfs_page *dfs_aspace_load_page(
struct dfs_file *file, off_t pos)
1004 struct dfs_page *page =
RT_NULL;
1009 struct dfs_aspace *aspace = vnode->
aspace;
1011 page = dfs_page_create();
1014 page->aspace = aspace;
1015 page->size = ARCH_PAGE_SIZE;
1016 page->fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
1017 aspace->ops->read(file, page);
1020 dfs_page_insert(page);
1027static struct dfs_page *dfs_page_lookup(
struct dfs_file *file, off_t pos)
1029 struct dfs_page *page =
RT_NULL;
1032 dfs_aspace_lock(aspace);
1033 page = dfs_page_search(aspace, pos);
1036 int count = RT_PAGECACHE_PRELOAD;
1037 struct dfs_page *tmp =
RT_NULL;
1038 off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
1042 page = dfs_aspace_load_page(file, fpos);
1051 dfs_page_release(page);
1059 fpos += ARCH_PAGE_SIZE;
1060 page = dfs_page_search(aspace, fpos);
1063 dfs_page_release(page);
1067 }
while (count && page ==
RT_NULL);
1072 dfs_aspace_unlock(aspace);
1074 if (
rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
1076 dfs_pcache_limit_check();
1078 else if (
rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
1080 dfs_pcache_mq_work(PCACHE_MQ_GC);
1086 dfs_aspace_unlock(aspace);
1091int dfs_aspace_read(
struct dfs_file *file,
void *buf,
size_t count, off_t *pos)
1100 struct dfs_aspace *aspace = vnode->
aspace;
1102 struct dfs_page *page;
1103 char *ptr = (
char *)buf;
1109 page = dfs_page_lookup(file, *pos);
1114 dfs_aspace_lock(aspace);
1115 if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
1117 len = aspace->vnode->size - *pos;
1121 len = page->fpos + ARCH_PAGE_SIZE - *pos;
1124 len = count > len ? len : count;
1127 rt_memcpy(ptr, page->page + *pos - page->fpos, len);
1135 dfs_page_release(page);
1136 dfs_aspace_unlock(aspace);
1139 dfs_page_release(page);
1140 dfs_aspace_unlock(aspace);
1152int dfs_aspace_write(
struct dfs_file *file,
const void *buf,
size_t count, off_t *pos)
1159 struct dfs_aspace *aspace = vnode->
aspace;
1161 struct dfs_page *page;
1162 char *ptr = (
char *)buf;
1164 if (!(aspace->ops->write))
1168 else if (aspace->mnt && (aspace->mnt->flags &
MNT_RDONLY))
1177 page = dfs_page_lookup(file, *pos);
1182 dfs_aspace_lock(aspace);
1183 len = page->fpos + ARCH_PAGE_SIZE - *pos;
1184 len = count > len ? len : count;
1185 rt_memcpy(page->page + *pos - page->fpos, ptr, len);
1191 if (*pos > aspace->vnode->size)
1193 aspace->vnode->size = *pos;
1196 if (file->
flags & O_SYNC)
1198 if (aspace->vnode->size < page->fpos + page->size)
1200 page->len = aspace->vnode->size - page->fpos;
1204 page->len = page->size;
1207 aspace->ops->write(page);
1212 dfs_page_dirty(page);
1215 dfs_page_release(page);
1216 dfs_aspace_unlock(aspace);
1228int dfs_aspace_flush(
struct dfs_aspace *aspace)
1233 struct dfs_page *page;
1235 dfs_aspace_lock(aspace);
1237 if (aspace->pages_count > 0 && aspace->vnode)
1242 if (page->is_dirty == 1 && aspace->vnode)
1244 if (aspace->vnode->size < page->fpos + page->size)
1246 page->len = aspace->vnode->size - page->fpos;
1250 page->len = page->size;
1253 if (aspace->ops->write)
1255 aspace->ops->write(page);
1264 dfs_aspace_unlock(aspace);
1269int dfs_aspace_clean(
struct dfs_aspace *aspace)
1273 dfs_aspace_lock(aspace);
1275 if (aspace->pages_count > 0)
1278 struct dfs_page *page;
1280 while (next && next != &aspace->list_active)
1282 if (next == &aspace->list_inactive)
1289 dfs_page_remove(page);
1293 dfs_aspace_unlock(aspace);
1299void *dfs_aspace_mmap(
struct dfs_file *file,
struct rt_varea *varea,
void *vaddr)
1302 struct dfs_page *page;
1304 rt_aspace_t target_aspace = varea->aspace;
1306 page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
1309 struct dfs_mmap *map = (
struct dfs_mmap *)
rt_calloc(1,
sizeof(
struct dfs_mmap));
1312 void *pg_vaddr = page->page;
1313 void *pg_paddr = rt_kmem_v2p(pg_vaddr);
1314 int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
1334 map->aspace = target_aspace;
1336 dfs_aspace_lock(aspace);
1338 dfs_page_release(page);
1339 dfs_aspace_unlock(aspace);
1343 dfs_page_release(page);
1349 dfs_page_release(page);
1356int dfs_aspace_unmap(
struct dfs_file *file,
struct rt_varea *varea)
1359 struct dfs_aspace *aspace = vnode->
aspace;
1360 void *unmap_start = varea->start;
1361 void *unmap_end = (
char *)unmap_start + varea->size;
1366 struct dfs_page *page;
1368 dfs_aspace_lock(aspace);
1369 if (aspace->pages_count > 0)
1373 if (next != &aspace->list_inactive)
1379 struct dfs_mmap *map;
1380 rt_varea_t map_varea =
RT_NULL;
1382 node = page->mmap_head.
next;
1384 while (node != &page->mmap_head)
1386 rt_aspace_t map_aspace;
1391 if (map && varea->aspace == map->aspace
1392 && map->vaddr >= unmap_start && map->vaddr < unmap_end)
1394 void *vaddr = map->vaddr;
1395 map_aspace = map->aspace;
1397 if (!map_varea || map_varea->aspace != map_aspace ||
1398 vaddr < map_varea->start ||
1399 vaddr >= map_varea->start + map_varea->size)
1402 map_varea = rt_aspace_query(map_aspace, vaddr);
1405 rt_varea_unmap_page(map_varea, vaddr);
1407 if (!rt_varea_is_private_locked(varea) &&
1408 page->fpos < page->aspace->vnode->size)
1410 dfs_page_dirty(page);
1421 dfs_aspace_unlock(aspace);
1427int dfs_aspace_page_unmap(
struct dfs_file *file,
struct rt_varea *varea,
void *vaddr)
1429 struct dfs_page *page;
1434 dfs_aspace_lock(aspace);
1436 page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
1440 struct dfs_mmap *map;
1441 rt_varea_unmap_page(varea, vaddr);
1443 node = page->mmap_head.
next;
1445 while (node != &page->mmap_head)
1451 if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
1453 if (!rt_varea_is_private_locked(varea))
1455 dfs_page_dirty(page);
1463 dfs_page_release(page);
1466 dfs_aspace_unlock(aspace);
1472int dfs_aspace_page_dirty(
struct dfs_file *file,
struct rt_varea *varea,
void *vaddr)
1474 struct dfs_page *page;
1479 dfs_aspace_lock(aspace);
1481 page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
1484 dfs_page_dirty(page);
1485 dfs_page_release(page);
1488 dfs_aspace_unlock(aspace);
1494off_t dfs_aspace_fpos(
struct rt_varea *varea,
void *vaddr)
1496 return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
1499void *dfs_aspace_vaddr(
struct rt_varea *varea, off_t fpos)
1501 return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
1504int dfs_aspace_mmap_read(
struct dfs_file *file,
struct rt_varea *varea,
void *data)
1510 struct rt_aspace_io_msg *msg = (
struct rt_aspace_io_msg *)data;
1513 off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
1514 return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
1521int dfs_aspace_mmap_write(
struct dfs_file *file,
struct rt_varea *varea,
void *data)
1527 struct rt_aspace_io_msg *msg = (
struct rt_aspace_io_msg *)data;
1530 off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
1531 return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
rt_weak rt_tick_t rt_tick_get_millisecond(void)
This function will return the passed millisecond from boot.
#define RT_WAITING_FOREVER
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_weak void * rt_calloc(rt_size_t count, rt_size_t size)
This function will contiguously allocate enough space for count objects that are size bytes of memory...
#define rt_list_for_each(pos, head)
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
#define rt_list_entry(node, type, member)
get the struct for this entry
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
#define rt_list_for_each_entry(pos, head, member)
rt_weak void rt_free(void *ptr)
This function will release the previously allocated memory block by rt_malloc. The released memory bl...
#define rt_container_of(ptr, type, member)
rt_err_t rt_thread_startup(rt_thread_t thread)
This function will start a thread and put it to system ready queue.
rt_err_t rt_thread_mdelay(rt_int32_t ms)
This function will let current thread delay for some milliseconds.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
rt_thread_t rt_thread_create(const char *name, void(*entry)(void *parameter), void *parameter, rt_uint32_t stack_size, rt_uint8_t priority, rt_uint32_t tick)
This function will create a thread object and allocate thread object memory. and stack.
struct rt_thread * rt_thread_t
#define MSH_CMD_EXPORT_ALIAS(...)
rt_err_t rt_mq_send_wait(rt_mq_t mq, const void *buffer, rt_size_t size, rt_int32_t timeout)
rt_mq_t rt_mq_create(const char *name, rt_size_t msg_size, rt_size_t max_msgs, rt_uint8_t flag)
Creating a messagequeue object.
rt_ssize_t rt_mq_recv(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout)
rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t time)
rt_err_t rt_mutex_detach(rt_mutex_t mutex)
This function will detach a static mutex object.
rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag)
Initialize a static mutex object.
rt_err_t rt_mutex_release(rt_mutex_t mutex)
This function will release a mutex. If there is thread suspended on the mutex, the thread will be res...
#define rt_atomic_sub(ptr, v)
#define rt_atomic_add(ptr, v)
#define rt_atomic_store(ptr, v)
#define rt_atomic_load(ptr)
#define INIT_PREV_EXPORT(fn)
#define rt_hw_cpu_dcache_ops(...)
#define rt_hw_cpu_icache_ops(...)
struct rt_list_node rt_list_t
struct dfs_aspace * aspace
struct rt_list_node * next