RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
dfs_pcache.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-05-05 RTT Implement mnt in dfs v2.0
9 * 2023-10-23 Shell fix synchronization of data to icache
10 */
11
12#define DBG_TAG "dfs.pcache"
13#define DBG_LVL DBG_WARNING
14#include <rtdbg.h>
15
16#include <dfs_pcache.h>
17#include <dfs_dentry.h>
18#include <dfs_mnt.h>
19
20#include <rthw.h>
21
22#ifdef RT_USING_PAGECACHE
23
24#include <mm_page.h>
25#include <mm_private.h>
26#include <mmu.h>
27#include <tlb.h>
28
29#ifndef RT_PAGECACHE_COUNT
30#define RT_PAGECACHE_COUNT 4096
31#endif
32
33#ifndef RT_PAGECACHE_ASPACE_COUNT
34#define RT_PAGECACHE_ASPACE_COUNT 1024
35#endif
36
37#ifndef RT_PAGECACHE_PRELOAD
38#define RT_PAGECACHE_PRELOAD 4
39#endif
40
41#ifndef RT_PAGECACHE_GC_WORK_LEVEL
42#define RT_PAGECACHE_GC_WORK_LEVEL 90
43#endif
44
45#ifndef RT_PAGECACHE_GC_STOP_LEVEL
46#define RT_PAGECACHE_GC_STOP_LEVEL 70
47#endif
48
49#define PCACHE_MQ_GC 1
50#define PCACHE_MQ_WB 2
51
52struct dfs_aspace_mmap_obj
53{
54 rt_uint32_t cmd;
55 struct rt_mailbox *ack;
56 struct dfs_file *file;
57 struct rt_varea *varea;
58 void *data;
59};
60
61struct dfs_pcache_mq_obj
62{
63 struct rt_mailbox *ack;
64 rt_uint32_t cmd;
65};
66
67static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos);
68static void dfs_page_ref(struct dfs_page *page);
69static int dfs_page_inactive(struct dfs_page *page);
70static int dfs_page_remove(struct dfs_page *page);
71static void dfs_page_release(struct dfs_page *page);
72static int dfs_page_dirty(struct dfs_page *page);
73
74static int dfs_aspace_release(struct dfs_aspace *aspace);
75
76static int dfs_aspace_lock(struct dfs_aspace *aspace);
77static int dfs_aspace_unlock(struct dfs_aspace *aspace);
78
79static int dfs_pcache_lock(void);
80static int dfs_pcache_unlock(void);
81
82
83static struct dfs_pcache __pcache;
84
85
86static int dfs_aspace_gc(struct dfs_aspace *aspace, int count)
87{
88 int cnt = count;
89
90 if (aspace)
91 {
92 dfs_aspace_lock(aspace);
93
94 if (aspace->pages_count > 0)
95 {
96 struct dfs_page *page = RT_NULL;
97 rt_list_t *node = aspace->list_inactive.next;
98
99 while (cnt && node != &aspace->list_active)
100 {
101 page = rt_list_entry(node, struct dfs_page, space_node);
102 node = node->next;
103 if (dfs_page_remove(page) == 0)
104 {
105 cnt --;
106 }
107 }
108
109 node = aspace->list_active.next;
110 while (cnt && node != &aspace->list_inactive)
111 {
112 page = rt_list_entry(node, struct dfs_page, space_node);
113 node = node->next;
114 if (dfs_page_remove(page) == 0)
115 {
116 cnt --;
117 }
118 }
119 }
120
121 dfs_aspace_unlock(aspace);
122 }
123
124 return count - cnt;
125}
126
127void dfs_pcache_release(size_t count)
128{
129 rt_list_t *node = RT_NULL;
130 struct dfs_aspace *aspace = RT_NULL;
131
132 dfs_pcache_lock();
133
134 if (count == 0)
135 {
136 count = rt_atomic_load(&(__pcache.pages_count)) - RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_STOP_LEVEL / 100;
137 }
138
139 node = __pcache.list_inactive.next;
140 while (count && node != &__pcache.list_active)
141 {
142 aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
143 node = node->next;
144 if (aspace)
145 {
146 count -= dfs_aspace_gc(aspace, count);
147 dfs_aspace_release(aspace);
148 }
149 }
150
151 node = __pcache.list_active.next;
152 while (count && node != &__pcache.list_inactive)
153 {
154 aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
155 node = node->next;
156 if (aspace)
157 {
158 count -= dfs_aspace_gc(aspace, count);
159 }
160 }
161
162 dfs_pcache_unlock();
163}
164
165static void _pcache_clean(struct dfs_mnt *mnt, int (*cb)(struct dfs_aspace *aspace))
166{
167 rt_list_t *node = RT_NULL;
168 struct dfs_aspace *aspace = RT_NULL;
169
170 dfs_pcache_lock();
171
172 node = __pcache.list_inactive.next;
173 while (node != &__pcache.list_active)
174 {
175 aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
176 node = node->next;
177 if (aspace && aspace->mnt == mnt)
178 {
179 dfs_aspace_clean(aspace);
180 cb(aspace);
181 }
182 }
183
184 node = __pcache.list_active.next;
185 while (node != &__pcache.list_inactive)
186 {
187 aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
188 node = node->next;
189 if (aspace && aspace->mnt == mnt)
190 {
191 dfs_aspace_clean(aspace);
192 cb(aspace);
193 }
194 }
195
196 dfs_pcache_unlock();
197}
198
199void dfs_pcache_unmount(struct dfs_mnt *mnt)
200{
201 _pcache_clean(mnt, dfs_aspace_release);
202}
203
204static int _dummy_cb(struct dfs_aspace *mnt)
205{
206 return 0;
207}
208
209void dfs_pcache_clean(struct dfs_mnt *mnt)
210{
211 _pcache_clean(mnt, _dummy_cb);
212}
213
214static int dfs_pcache_limit_check(void)
215{
216 int index = 4;
217
218 while (index && rt_atomic_load(&(__pcache.pages_count)) > RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
219 {
220 dfs_pcache_release(0);
221 index --;
222 }
223
224 return 0;
225}
226
227static void dfs_pcache_thread(void *parameter)
228{
229 struct dfs_pcache_mq_obj work;
230
231 while (1)
232 {
233 if (rt_mq_recv(__pcache.mqueue, &work, sizeof(work), RT_WAITING_FOREVER) == sizeof(work))
234 {
235 if (work.cmd == PCACHE_MQ_GC)
236 {
237 dfs_pcache_limit_check();
238 }
239 else if (work.cmd == PCACHE_MQ_WB)
240 {
241 int count = 0;
242 rt_list_t *node;
243 struct dfs_page *page = 0;
244
245 while (1)
246 {
247 /* try to get dirty page */
248 dfs_pcache_lock();
249 page = 0;
250 rt_list_for_each(node, &__pcache.list_active)
251 {
252 if (node != &__pcache.list_inactive)
253 {
254 struct dfs_aspace *aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
255 dfs_aspace_lock(aspace);
256 if (aspace->list_dirty.next != &aspace->list_dirty)
257 {
258 page = rt_list_entry(aspace->list_dirty.next, struct dfs_page, dirty_node);
259 dfs_page_ref(page);
260 dfs_aspace_unlock(aspace);
261 break;
262 }
263 else
264 {
265 page = RT_NULL;
266 }
267 dfs_aspace_unlock(aspace);
268 }
269 }
270 dfs_pcache_unlock();
271
272 if (page)
273 {
274 struct dfs_aspace *aspace = page->aspace;
275
276 dfs_aspace_lock(aspace);
277 if (page->is_dirty == 1 && aspace->vnode)
278 {
279 if (rt_tick_get_millisecond() - page->tick_ms >= 500)
280 {
281 if (aspace->vnode->size < page->fpos + page->size)
282 {
283 page->len = aspace->vnode->size - page->fpos;
284 }
285 else
286 {
287 page->len = page->size;
288 }
289 if (aspace->ops->write)
290 {
291 aspace->ops->write(page);
292 }
293
294 page->is_dirty = 0;
295
296 if (page->dirty_node.next != RT_NULL)
297 {
298 rt_list_remove(&page->dirty_node);
299 page->dirty_node.next = RT_NULL;
300 }
301 }
302 }
303 dfs_page_release(page);
304 dfs_aspace_unlock(aspace);
305 }
306 else
307 {
308 break;
309 }
310
312
313 count ++;
314 if (count >= 4)
315 {
316 break;
317 }
318 }
319 }
320 }
321 }
322}
323
324static int dfs_pcache_init(void)
325{
326 rt_thread_t tid;
327
328 for (int i = 0; i < RT_PAGECACHE_HASH_NR; i++)
329 {
330 rt_list_init(&__pcache.head[i]);
331 }
332
333 rt_list_init(&__pcache.list_active);
334 rt_list_init(&__pcache.list_inactive);
335 rt_list_insert_after(&__pcache.list_active, &__pcache.list_inactive);
336
337 rt_atomic_store(&(__pcache.pages_count), 0);
338
339 rt_mutex_init(&__pcache.lock, "pcache", RT_IPC_FLAG_PRIO);
340
341 __pcache.mqueue = rt_mq_create("pcache", sizeof(struct dfs_pcache_mq_obj), 1024, RT_IPC_FLAG_FIFO);
342 tid = rt_thread_create("pcache", dfs_pcache_thread, 0, 8192, 25, 5);
343 if (tid)
344 {
346 }
347
348 __pcache.last_time_wb = rt_tick_get_millisecond();
349
350 return 0;
351}
352INIT_PREV_EXPORT(dfs_pcache_init);
353
354static rt_ubase_t dfs_pcache_mq_work(rt_uint32_t cmd)
355{
356 rt_err_t err;
357 struct dfs_pcache_mq_obj work = { 0 };
358
359 work.cmd = cmd;
360
361 err = rt_mq_send_wait(__pcache.mqueue, (const void *)&work, sizeof(struct dfs_pcache_mq_obj), 0);
362
363 return err;
364}
365
366static int dfs_pcache_lock(void)
367{
368 rt_mutex_take(&__pcache.lock, RT_WAITING_FOREVER);
369 return 0;
370}
371
372static int dfs_pcache_unlock(void)
373{
374 rt_mutex_release(&__pcache.lock);
375 return 0;
376}
377
378static uint32_t dfs_aspace_hash(struct dfs_mnt *mnt, const char *path)
379{
380 uint32_t val = 0;
381
382 if (path)
383 {
384 while (*path)
385 {
386 val = ((val << 5) + val) + *path++;
387 }
388 }
389
390 return (val ^ (unsigned long)mnt) & (RT_PAGECACHE_HASH_NR - 1);
391}
392
393static struct dfs_aspace *dfs_aspace_hash_lookup(struct dfs_dentry *dentry, const struct dfs_aspace_ops *ops)
394{
395 struct dfs_aspace *aspace = RT_NULL;
396
397 dfs_pcache_lock();
398 rt_list_for_each_entry(aspace, &__pcache.head[dfs_aspace_hash(dentry->mnt, dentry->pathname)], hash_node)
399 {
400
401 if (aspace->mnt == dentry->mnt
402 && aspace->ops == ops
403 && !strcmp(aspace->pathname, dentry->pathname))
404 {
405 rt_atomic_add(&aspace->ref_count, 1);
406 dfs_pcache_unlock();
407 return aspace;
408 }
409 }
410 dfs_pcache_unlock();
411
412 return RT_NULL;
413}
414
415static void dfs_aspace_insert(struct dfs_aspace *aspace)
416{
417 uint32_t val = 0;
418
419 val = dfs_aspace_hash(aspace->mnt, aspace->pathname);
420
421 dfs_pcache_lock();
422 rt_atomic_add(&aspace->ref_count, 1);
423 rt_list_insert_after(&__pcache.head[val], &aspace->hash_node);
424 rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
425 dfs_pcache_unlock();
426}
427
428static void dfs_aspace_remove(struct dfs_aspace *aspace)
429{
430 dfs_pcache_lock();
431 if (aspace->hash_node.next != RT_NULL)
432 {
433 rt_list_remove(&aspace->hash_node);
434 }
435 if (aspace->cache_node.next != RT_NULL)
436 {
437 rt_list_remove(&aspace->cache_node);
438 }
439 dfs_pcache_unlock();
440}
441
442static void dfs_aspace_active(struct dfs_aspace *aspace)
443{
444 dfs_pcache_lock();
445 if (aspace->cache_node.next != RT_NULL)
446 {
447 rt_list_remove(&aspace->cache_node);
448 rt_list_insert_before(&__pcache.list_inactive, &aspace->cache_node);
449 }
450 dfs_pcache_unlock();
451}
452
453static void dfs_aspace_inactive(struct dfs_aspace *aspace)
454{
455 dfs_pcache_lock();
456 if (aspace->cache_node.next != RT_NULL)
457 {
458 rt_list_remove(&aspace->cache_node);
459 rt_list_insert_before(&__pcache.list_active, &aspace->cache_node);
460 }
461 dfs_pcache_unlock();
462}
463
464static struct dfs_aspace *_dfs_aspace_create(struct dfs_dentry *dentry,
465 struct dfs_vnode *vnode,
466 const struct dfs_aspace_ops *ops)
467{
468 struct dfs_aspace *aspace;
469
470 aspace = rt_calloc(1, sizeof(struct dfs_aspace));
471 if (aspace)
472 {
473 rt_list_init(&aspace->list_active);
474 rt_list_init(&aspace->list_inactive);
475 rt_list_init(&aspace->list_dirty);
476 rt_list_insert_after(&aspace->list_active, &aspace->list_inactive);
477
478 aspace->avl_root.root_node = 0;
479 aspace->avl_page = 0;
480
481 rt_mutex_init(&aspace->lock, rt_thread_self()->parent.name, RT_IPC_FLAG_PRIO);
482 rt_atomic_store(&aspace->ref_count, 1);
483
484 aspace->pages_count = 0;
485 aspace->vnode = vnode;
486 aspace->ops = ops;
487
488 if (dentry && dentry->mnt)
489 {
490 aspace->mnt = dentry->mnt;
491 aspace->fullpath = rt_strdup(dentry->mnt->fullpath);
492 aspace->pathname = rt_strdup(dentry->pathname);
493 }
494
495 dfs_aspace_insert(aspace);
496 }
497
498 return aspace;
499}
500
501struct dfs_aspace *dfs_aspace_create(struct dfs_dentry *dentry,
502 struct dfs_vnode *vnode,
503 const struct dfs_aspace_ops *ops)
504{
505 struct dfs_aspace *aspace = RT_NULL;
506
507 RT_ASSERT(vnode && ops);
508 dfs_pcache_lock();
509 if (dentry)
510 {
511 aspace = dfs_aspace_hash_lookup(dentry, ops);
512 }
513
514 if (!aspace)
515 {
516 aspace = _dfs_aspace_create(dentry, vnode, ops);
517 }
518 else
519 {
520 aspace->vnode = vnode;
521 dfs_aspace_active(aspace);
522 }
523 dfs_pcache_unlock();
524 return aspace;
525}
526
527int dfs_aspace_destroy(struct dfs_aspace *aspace)
528{
529 int ret = -EINVAL;
530
531 if (aspace)
532 {
533 dfs_pcache_lock();
534 dfs_aspace_lock(aspace);
535 rt_atomic_sub(&aspace->ref_count, 1);
536 RT_ASSERT(rt_atomic_load(&aspace->ref_count) > 0);
537 dfs_aspace_inactive(aspace);
538 aspace->vnode = RT_NULL;
539 if (dfs_aspace_release(aspace) != 0)
540 {
541 dfs_aspace_unlock(aspace);
542 }
543 dfs_pcache_unlock();
544 }
545
546 return ret;
547}
548
549static int dfs_aspace_release(struct dfs_aspace *aspace)
550{
551 int ret = -1;
552
553 if (aspace)
554 {
555 dfs_pcache_lock();
556 dfs_aspace_lock(aspace);
557
558 if (rt_atomic_load(&aspace->ref_count) == 1 && aspace->pages_count == 0)
559 {
560 dfs_aspace_remove(aspace);
561 if (aspace->fullpath)
562 {
563 rt_free(aspace->fullpath);
564 }
565 if (aspace->pathname)
566 {
567 rt_free(aspace->pathname);
568 }
569 rt_mutex_detach(&aspace->lock);
570 rt_free(aspace);
571 ret = 0;
572 }
573 else
574 {
575 dfs_aspace_unlock(aspace);
576 }
577 dfs_pcache_unlock();
578 }
579
580 return ret;
581}
582
583static int _dfs_aspace_dump(struct dfs_aspace *aspace, int is_dirty)
584{
585 if (aspace)
586 {
587 rt_list_t *next;
588 struct dfs_page *page;
589
590 dfs_aspace_lock(aspace);
591 if (aspace->pages_count > 0)
592 {
593 rt_list_for_each(next, &aspace->list_inactive)
594 {
595 if (next != &aspace->list_active)
596 {
597 page = rt_list_entry(next, struct dfs_page, space_node);
598 if (is_dirty && page->is_dirty)
599 {
600 rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
601 }
602 else if (is_dirty == 0)
603 {
604 rt_kprintf(" pages >> fpos: %d index :%d is_dirty: %d\n", page->fpos, page->fpos / ARCH_PAGE_SIZE, page->is_dirty);
605 }
606 }
607 }
608 }
609 else
610 {
611 rt_kprintf(" pages >> empty\n");
612 }
613 dfs_aspace_unlock(aspace);
614 }
615 return 0;
616}
617
618static int dfs_pcache_dump(int argc, char **argv)
619{
620 int dump = 0;
621 rt_list_t *node;
622 struct dfs_aspace *aspace;
623
624 if (argc == 2)
625 {
626 if (strcmp(argv[1], "--dump") == 0)
627 {
628 dump = 1;
629 }
630 else if (strcmp(argv[1], "--dirty") == 0)
631 {
632 dump = 2;
633 }
634 else
635 {
636 rt_kprintf("dfs page cache dump\n");
637 rt_kprintf("usage: dfs_cache\n");
638 rt_kprintf(" dfs_cache --dump\n");
639 rt_kprintf(" dfs_cache --dirty\n");
640 return 0;
641 }
642 }
643
644 dfs_pcache_lock();
645
646 rt_kprintf("total pages count: %d / %d\n", rt_atomic_load(&(__pcache.pages_count)), RT_PAGECACHE_COUNT);
647
648 rt_list_for_each(node, &__pcache.list_active)
649 {
650 if (node != &__pcache.list_inactive)
651 {
652 aspace = rt_list_entry(node, struct dfs_aspace, cache_node);
653
654 if (aspace->mnt)
655 {
656 rt_kprintf("file: %s%s pages: %d\n", aspace->fullpath, aspace->pathname, aspace->pages_count);
657 }
658 else
659 {
660 rt_kprintf("unknown type, pages: %d\n", aspace->pages_count);
661 }
662
663 if (dump > 0)
664 {
665 _dfs_aspace_dump(aspace, dump == 2 ? 1 : 0);
666 }
667 }
668 }
669
670 dfs_pcache_unlock();
671
672 return 0;
673}
674MSH_CMD_EXPORT_ALIAS(dfs_pcache_dump, dfs_cache, dump dfs page cache);
675
676static int dfs_page_unmap(struct dfs_page *page)
677{
678 rt_list_t *next;
679 struct dfs_mmap *map;
680
681 next = page->mmap_head.next;
682
683 if (next != &page->mmap_head && page->fpos < page->aspace->vnode->size)
684 {
685 dfs_page_dirty(page);
686 }
687
688 while (next != &page->mmap_head)
689 {
690 map = rt_list_entry(next, struct dfs_mmap, mmap_node);
691 next = next->next;
692
693 if (map)
694 {
695 rt_varea_t varea;
696 void *vaddr;
697
698 varea = rt_aspace_query(map->aspace, map->vaddr);
699 RT_ASSERT(varea);
700 vaddr = dfs_aspace_vaddr(varea, page->fpos);
701
702 rt_varea_unmap_page(varea, vaddr);
703
704 rt_free(map);
705 }
706 }
707
708 rt_list_init(&page->mmap_head);
709
710 return 0;
711}
712
713static struct dfs_page *dfs_page_create(void)
714{
715 struct dfs_page *page = RT_NULL;
716
717 page = rt_calloc(1, sizeof(struct dfs_page));
718 if (page)
719 {
720 page->page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
721 if (page->page)
722 {
723 //memset(page->page, 0x00, ARCH_PAGE_SIZE);
724 rt_list_init(&page->mmap_head);
725 rt_atomic_store(&(page->ref_count), 1);
726 }
727 else
728 {
729 LOG_E("page alloc failed!\n");
730 rt_free(page);
731 page = RT_NULL;
732 }
733 }
734
735 return page;
736}
737
738static void dfs_page_ref(struct dfs_page *page)
739{
740 rt_atomic_add(&(page->ref_count), 1);
741}
742
743static void dfs_page_release(struct dfs_page *page)
744{
745 struct dfs_aspace *aspace = page->aspace;
746
747 dfs_aspace_lock(aspace);
748
749 rt_atomic_sub(&(page->ref_count), 1);
750
751 if (rt_atomic_load(&(page->ref_count)) == 0)
752 {
753 dfs_page_unmap(page);
754
755 if (page->is_dirty == 1 && aspace->vnode)
756 {
757 if (aspace->vnode->size < page->fpos + page->size)
758 {
759 page->len = aspace->vnode->size - page->fpos;
760 }
761 else
762 {
763 page->len = page->size;
764 }
765 if (aspace->ops->write)
766 {
767 aspace->ops->write(page);
768 }
769 page->is_dirty = 0;
770 }
771 RT_ASSERT(page->is_dirty == 0);
772
773 rt_pages_free(page->page, 0);
774 page->page = RT_NULL;
775 rt_free(page);
776 }
777
778 dfs_aspace_unlock(aspace);
779}
780
781static int dfs_page_compare(off_t fpos, off_t value)
782{
783 return fpos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE - value;
784}
785
786static int _dfs_page_insert(struct dfs_aspace *aspace, struct dfs_page *page)
787{
788 struct dfs_page *tmp;
789 struct util_avl_struct *current = NULL;
790 struct util_avl_struct **next = &(aspace->avl_root.root_node);
791
792 /* Figure out where to put new node */
793 while (*next)
794 {
795 current = *next;
796 tmp = rt_container_of(current, struct dfs_page, avl_node);
797
798 if (page->fpos < tmp->fpos)
799 next = &(current->avl_left);
800 else if (page->fpos > tmp->fpos)
801 next = &(current->avl_right);
802 else
803 return -1;
804 }
805
806 /* Add new node and rebalance tree. */
807 util_avl_link(&page->avl_node, current, next);
808 util_avl_rebalance(current, &aspace->avl_root);
809 aspace->avl_page = page;
810
811 return 0;
812}
813
814static void _dfs_page_remove(struct dfs_aspace *aspace, struct dfs_page *page)
815{
816 if (aspace->avl_page && aspace->avl_page == page)
817 {
818 aspace->avl_page = 0;
819 }
820
821 util_avl_remove(&page->avl_node, &aspace->avl_root);
822}
823
824static int dfs_aspace_lock(struct dfs_aspace *aspace)
825{
826 rt_mutex_take(&aspace->lock, RT_WAITING_FOREVER);
827 return 0;
828}
829
830static int dfs_aspace_unlock(struct dfs_aspace *aspace)
831{
832 rt_mutex_release(&aspace->lock);
833 return 0;
834}
835
836static int dfs_page_insert(struct dfs_page *page)
837{
838 struct dfs_aspace *aspace = page->aspace;
839
840 dfs_aspace_lock(aspace);
841
842 rt_list_insert_before(&aspace->list_inactive, &page->space_node);
843 aspace->pages_count ++;
844
845 if (_dfs_page_insert(aspace, page))
846 {
847 RT_ASSERT(0);
848 }
849
850 if (aspace->pages_count > RT_PAGECACHE_ASPACE_COUNT)
851 {
852 rt_list_t *next = aspace->list_active.next;
853
854 if (next != &aspace->list_inactive)
855 {
856 struct dfs_page *tmp = rt_list_entry(next, struct dfs_page, space_node);
857 dfs_page_inactive(tmp);
858 }
859 }
860
861 rt_atomic_add(&(__pcache.pages_count), 1);
862
863 dfs_aspace_unlock(aspace);
864
865 return 0;
866}
867
868static int dfs_page_remove(struct dfs_page *page)
869{
870 int ret = -1;
871 struct dfs_aspace *aspace = page->aspace;
872
873 dfs_aspace_lock(aspace);
874
875 if (rt_atomic_load(&(page->ref_count)) == 1)
876 {
877 if (page->space_node.next != RT_NULL)
878 {
879 rt_list_remove(&page->space_node);
880 page->space_node.next = RT_NULL;
881 aspace->pages_count--;
882 _dfs_page_remove(aspace, page);
883 }
884 if (page->dirty_node.next != RT_NULL)
885 {
886 rt_list_remove(&page->dirty_node);
887 page->dirty_node.next = RT_NULL;
888 }
889
890 rt_atomic_sub(&(__pcache.pages_count), 1);
891
892 dfs_page_release(page);
893 ret = 0;
894 }
895
896 dfs_aspace_unlock(aspace);
897
898 return ret;
899}
900
901static int dfs_page_active(struct dfs_page *page)
902{
903 struct dfs_aspace *aspace = page->aspace;
904
905 dfs_aspace_lock(aspace);
906 if (page->space_node.next != RT_NULL)
907 {
908 rt_list_remove(&page->space_node);
909 rt_list_insert_before(&aspace->list_inactive, &page->space_node);
910 }
911 dfs_aspace_unlock(aspace);
912
913 return 0;
914}
915
916static int dfs_page_inactive(struct dfs_page *page)
917{
918 struct dfs_aspace *aspace = page->aspace;
919
920 dfs_aspace_lock(aspace);
921 if (page->space_node.next != RT_NULL)
922 {
923 rt_list_remove(&page->space_node);
924 rt_list_insert_before(&aspace->list_active, &page->space_node);
925 }
926 dfs_aspace_unlock(aspace);
927
928 return 0;
929}
930
931static int dfs_page_dirty(struct dfs_page *page)
932{
933 struct dfs_aspace *aspace = page->aspace;
934
935 dfs_aspace_lock(aspace);
936
937 if (page->dirty_node.next == RT_NULL && page->space_node.next != RT_NULL)
938 {
939 rt_list_insert_before(&aspace->list_dirty, &page->dirty_node);
940 }
941
942 page->is_dirty = 1;
943 page->tick_ms = rt_tick_get_millisecond();
944
945 if (rt_tick_get_millisecond() - __pcache.last_time_wb >= 1000)
946 {
947 dfs_pcache_mq_work(PCACHE_MQ_WB);
948 __pcache.last_time_wb = rt_tick_get_millisecond();
949 }
950
951 dfs_aspace_unlock(aspace);
952
953 return 0;
954}
955
956static struct dfs_page *dfs_page_search(struct dfs_aspace *aspace, off_t fpos)
957{
958 int cmp;
959 struct dfs_page *page;
960 struct util_avl_struct *avl_node;
961
962 dfs_aspace_lock(aspace);
963
964 if (aspace->avl_page && dfs_page_compare(fpos, aspace->avl_page->fpos) == 0)
965 {
966 page = aspace->avl_page;
967 dfs_page_active(page);
968 dfs_page_ref(page);
969 dfs_aspace_unlock(aspace);
970 return page;
971 }
972
973 avl_node = aspace->avl_root.root_node;
974 while (avl_node)
975 {
976 page = rt_container_of(avl_node, struct dfs_page, avl_node);
977 cmp = dfs_page_compare(fpos, page->fpos);
978
979 if (cmp < 0)
980 {
981 avl_node = avl_node->avl_left;
982 }
983 else if (cmp > 0)
984 {
985 avl_node = avl_node->avl_right;
986 }
987 else
988 {
989 aspace->avl_page = page;
990 dfs_page_active(page);
991 dfs_page_ref(page);
992 dfs_aspace_unlock(aspace);
993 return page;
994 }
995 }
996
997 dfs_aspace_unlock(aspace);
998
999 return RT_NULL;
1000}
1001
1002static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
1003{
1004 struct dfs_page *page = RT_NULL;
1005
1006 if (file && file->vnode && file->vnode->aspace)
1007 {
1008 struct dfs_vnode *vnode = file->vnode;
1009 struct dfs_aspace *aspace = vnode->aspace;
1010
1011 page = dfs_page_create();
1012 if (page)
1013 {
1014 page->aspace = aspace;
1015 page->size = ARCH_PAGE_SIZE;
1016 page->fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
1017 aspace->ops->read(file, page);
1018 page->ref_count ++;
1019
1020 dfs_page_insert(page);
1021 }
1022 }
1023
1024 return page;
1025}
1026
1027static struct dfs_page *dfs_page_lookup(struct dfs_file *file, off_t pos)
1028{
1029 struct dfs_page *page = RT_NULL;
1030 struct dfs_aspace *aspace = file->vnode->aspace;
1031
1032 dfs_aspace_lock(aspace);
1033 page = dfs_page_search(aspace, pos);
1034 if (!page)
1035 {
1036 int count = RT_PAGECACHE_PRELOAD;
1037 struct dfs_page *tmp = RT_NULL;
1038 off_t fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
1039
1040 do
1041 {
1042 page = dfs_aspace_load_page(file, fpos);
1043 if (page)
1044 {
1045 if (tmp == RT_NULL)
1046 {
1047 tmp = page;
1048 }
1049 else
1050 {
1051 dfs_page_release(page);
1052 }
1053 }
1054 else
1055 {
1056 break;
1057 }
1058
1059 fpos += ARCH_PAGE_SIZE;
1060 page = dfs_page_search(aspace, fpos);
1061 if (page)
1062 {
1063 dfs_page_release(page);
1064 }
1065 count --;
1066
1067 } while (count && page == RT_NULL);
1068
1069 page = tmp;
1070 if (page)
1071 {
1072 dfs_aspace_unlock(aspace);
1073
1074 if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT)
1075 {
1076 dfs_pcache_limit_check();
1077 }
1078 else if (rt_atomic_load(&(__pcache.pages_count)) >= RT_PAGECACHE_COUNT * RT_PAGECACHE_GC_WORK_LEVEL / 100)
1079 {
1080 dfs_pcache_mq_work(PCACHE_MQ_GC);
1081 }
1082
1083 return page;
1084 }
1085 }
1086 dfs_aspace_unlock(aspace);
1087
1088 return page;
1089}
1090
1091int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
1092{
1093 int ret = -EINVAL;
1094
1095 if (file && file->vnode && file->vnode->aspace)
1096 {
1097 if (!(file->vnode->aspace->ops->read))
1098 return ret;
1099 struct dfs_vnode *vnode = file->vnode;
1100 struct dfs_aspace *aspace = vnode->aspace;
1101
1102 struct dfs_page *page;
1103 char *ptr = (char *)buf;
1104
1105 ret = 0;
1106
1107 while (count)
1108 {
1109 page = dfs_page_lookup(file, *pos);
1110 if (page)
1111 {
1112 off_t len;
1113
1114 dfs_aspace_lock(aspace);
1115 if (aspace->vnode->size < page->fpos + ARCH_PAGE_SIZE)
1116 {
1117 len = aspace->vnode->size - *pos;
1118 }
1119 else
1120 {
1121 len = page->fpos + ARCH_PAGE_SIZE - *pos;
1122 }
1123
1124 len = count > len ? len : count;
1125 if (len > 0)
1126 {
1127 rt_memcpy(ptr, page->page + *pos - page->fpos, len);
1128 ptr += len;
1129 *pos += len;
1130 count -= len;
1131 ret += len;
1132 }
1133 else
1134 {
1135 dfs_page_release(page);
1136 dfs_aspace_unlock(aspace);
1137 break;
1138 }
1139 dfs_page_release(page);
1140 dfs_aspace_unlock(aspace);
1141 }
1142 else
1143 {
1144 break;
1145 }
1146 }
1147 }
1148
1149 return ret;
1150}
1151
1152int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
1153{
1154 int ret = -EINVAL;
1155
1156 if (file && file->vnode && file->vnode->aspace)
1157 {
1158 struct dfs_vnode *vnode = file->vnode;
1159 struct dfs_aspace *aspace = vnode->aspace;
1160
1161 struct dfs_page *page;
1162 char *ptr = (char *)buf;
1163
1164 if (!(aspace->ops->write))
1165 {
1166 return ret;
1167 }
1168 else if (aspace->mnt && (aspace->mnt->flags & MNT_RDONLY))
1169 {
1170 return -EROFS;
1171 }
1172
1173 ret = 0;
1174
1175 while (count)
1176 {
1177 page = dfs_page_lookup(file, *pos);
1178 if (page)
1179 {
1180 off_t len;
1181
1182 dfs_aspace_lock(aspace);
1183 len = page->fpos + ARCH_PAGE_SIZE - *pos;
1184 len = count > len ? len : count;
1185 rt_memcpy(page->page + *pos - page->fpos, ptr, len);
1186 ptr += len;
1187 *pos += len;
1188 count -= len;
1189 ret += len;
1190
1191 if (*pos > aspace->vnode->size)
1192 {
1193 aspace->vnode->size = *pos;
1194 }
1195
1196 if (file->flags & O_SYNC)
1197 {
1198 if (aspace->vnode->size < page->fpos + page->size)
1199 {
1200 page->len = aspace->vnode->size - page->fpos;
1201 }
1202 else
1203 {
1204 page->len = page->size;
1205 }
1206
1207 aspace->ops->write(page);
1208 page->is_dirty = 0;
1209 }
1210 else
1211 {
1212 dfs_page_dirty(page);
1213 }
1214
1215 dfs_page_release(page);
1216 dfs_aspace_unlock(aspace);
1217 }
1218 else
1219 {
1220 break;
1221 }
1222 }
1223 }
1224
1225 return ret;
1226}
1227
1228int dfs_aspace_flush(struct dfs_aspace *aspace)
1229{
1230 if (aspace)
1231 {
1232 rt_list_t *next;
1233 struct dfs_page *page;
1234
1235 dfs_aspace_lock(aspace);
1236
1237 if (aspace->pages_count > 0 && aspace->vnode)
1238 {
1239 rt_list_for_each(next, &aspace->list_dirty)
1240 {
1241 page = rt_list_entry(next, struct dfs_page, dirty_node);
1242 if (page->is_dirty == 1 && aspace->vnode)
1243 {
1244 if (aspace->vnode->size < page->fpos + page->size)
1245 {
1246 page->len = aspace->vnode->size - page->fpos;
1247 }
1248 else
1249 {
1250 page->len = page->size;
1251 }
1252
1253 if (aspace->ops->write)
1254 {
1255 aspace->ops->write(page);
1256 }
1257
1258 page->is_dirty = 0;
1259 }
1260 RT_ASSERT(page->is_dirty == 0);
1261 }
1262 }
1263
1264 dfs_aspace_unlock(aspace);
1265 }
1266 return 0;
1267}
1268
1269int dfs_aspace_clean(struct dfs_aspace *aspace)
1270{
1271 if (aspace)
1272 {
1273 dfs_aspace_lock(aspace);
1274
1275 if (aspace->pages_count > 0)
1276 {
1277 rt_list_t *next = aspace->list_active.next;
1278 struct dfs_page *page;
1279
1280 while (next && next != &aspace->list_active)
1281 {
1282 if (next == &aspace->list_inactive)
1283 {
1284 next = next->next;
1285 continue;
1286 }
1287 page = rt_list_entry(next, struct dfs_page, space_node);
1288 next = next->next;
1289 dfs_page_remove(page);
1290 }
1291 }
1292
1293 dfs_aspace_unlock(aspace);
1294 }
1295
1296 return 0;
1297}
1298
1299void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
1300{
1301 void *ret = RT_NULL;
1302 struct dfs_page *page;
1303 struct dfs_aspace *aspace = file->vnode->aspace;
1304 rt_aspace_t target_aspace = varea->aspace;
1305
1306 page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
1307 if (page)
1308 {
1309 struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
1310 if (map)
1311 {
1312 void *pg_vaddr = page->page;
1313 void *pg_paddr = rt_kmem_v2p(pg_vaddr);
1314 int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
1315 if (err == RT_EOK)
1316 {
1330 rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
1331 rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
1332
1333 ret = pg_vaddr;
1334 map->aspace = target_aspace;
1335 map->vaddr = vaddr;
1336 dfs_aspace_lock(aspace);
1337 rt_list_insert_after(&page->mmap_head, &map->mmap_node);
1338 dfs_page_release(page);
1339 dfs_aspace_unlock(aspace);
1340 }
1341 else
1342 {
1343 dfs_page_release(page);
1344 rt_free(map);
1345 }
1346 }
1347 else
1348 {
1349 dfs_page_release(page);
1350 }
1351 }
1352
1353 return ret;
1354}
1355
1356int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
1357{
1358 struct dfs_vnode *vnode = file->vnode;
1359 struct dfs_aspace *aspace = vnode->aspace;
1360 void *unmap_start = varea->start;
1361 void *unmap_end = (char *)unmap_start + varea->size;
1362
1363 if (aspace)
1364 {
1365 rt_list_t *next;
1366 struct dfs_page *page;
1367
1368 dfs_aspace_lock(aspace);
1369 if (aspace->pages_count > 0)
1370 {
1371 rt_list_for_each(next, &aspace->list_active)
1372 {
1373 if (next != &aspace->list_inactive)
1374 {
1375 page = rt_list_entry(next, struct dfs_page, space_node);
1376 if (page)
1377 {
1378 rt_list_t *node, *tmp;
1379 struct dfs_mmap *map;
1380 rt_varea_t map_varea = RT_NULL;
1381
1382 node = page->mmap_head.next;
1383
1384 while (node != &page->mmap_head)
1385 {
1386 rt_aspace_t map_aspace;
1387 map = rt_list_entry(node, struct dfs_mmap, mmap_node);
1388 tmp = node;
1389 node = node->next;
1390
1391 if (map && varea->aspace == map->aspace
1392 && map->vaddr >= unmap_start && map->vaddr < unmap_end)
1393 {
1394 void *vaddr = map->vaddr;
1395 map_aspace = map->aspace;
1396
1397 if (!map_varea || map_varea->aspace != map_aspace ||
1398 vaddr < map_varea->start ||
1399 vaddr >= map_varea->start + map_varea->size)
1400 {
1401 /* lock the tree so we don't access uncompleted data */
1402 map_varea = rt_aspace_query(map_aspace, vaddr);
1403 }
1404
1405 rt_varea_unmap_page(map_varea, vaddr);
1406
1407 if (!rt_varea_is_private_locked(varea) &&
1408 page->fpos < page->aspace->vnode->size)
1409 {
1410 dfs_page_dirty(page);
1411 }
1412 rt_list_remove(tmp);
1413 rt_free(map);
1414 break;
1415 }
1416 }
1417 }
1418 }
1419 }
1420 }
1421 dfs_aspace_unlock(aspace);
1422 }
1423
1424 return 0;
1425}
1426
1427int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
1428{
1429 struct dfs_page *page;
1430 struct dfs_aspace *aspace = file->vnode->aspace;
1431
1432 if (aspace)
1433 {
1434 dfs_aspace_lock(aspace);
1435
1436 page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
1437 if (page)
1438 {
1439 rt_list_t *node, *tmp;
1440 struct dfs_mmap *map;
1441 rt_varea_unmap_page(varea, vaddr);
1442
1443 node = page->mmap_head.next;
1444
1445 while (node != &page->mmap_head)
1446 {
1447 map = rt_list_entry(node, struct dfs_mmap, mmap_node);
1448 tmp = node;
1449 node = node->next;
1450
1451 if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
1452 {
1453 if (!rt_varea_is_private_locked(varea))
1454 {
1455 dfs_page_dirty(page);
1456 }
1457 rt_list_remove(tmp);
1458 rt_free(map);
1459 break;
1460 }
1461 }
1462
1463 dfs_page_release(page);
1464 }
1465
1466 dfs_aspace_unlock(aspace);
1467 }
1468
1469 return 0;
1470}
1471
1472int dfs_aspace_page_dirty(struct dfs_file *file, struct rt_varea *varea, void *vaddr)
1473{
1474 struct dfs_page *page;
1475 struct dfs_aspace *aspace = file->vnode->aspace;
1476
1477 if (aspace)
1478 {
1479 dfs_aspace_lock(aspace);
1480
1481 page = dfs_page_search(aspace, dfs_aspace_fpos(varea, vaddr));
1482 if (page)
1483 {
1484 dfs_page_dirty(page);
1485 dfs_page_release(page);
1486 }
1487
1488 dfs_aspace_unlock(aspace);
1489 }
1490
1491 return 0;
1492}
1493
1494off_t dfs_aspace_fpos(struct rt_varea *varea, void *vaddr)
1495{
1496 return (off_t)(intptr_t)vaddr - (off_t)(intptr_t)varea->start + varea->offset * ARCH_PAGE_SIZE;
1497}
1498
1499void *dfs_aspace_vaddr(struct rt_varea *varea, off_t fpos)
1500{
1501 return varea->start + fpos - varea->offset * ARCH_PAGE_SIZE;
1502}
1503
1504int dfs_aspace_mmap_read(struct dfs_file *file, struct rt_varea *varea, void *data)
1505{
1506 int ret = 0;
1507
1508 if (file && varea)
1509 {
1510 struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
1511 if (msg)
1512 {
1513 off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
1514 return dfs_aspace_read(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
1515 }
1516 }
1517
1518 return ret;
1519}
1520
1521int dfs_aspace_mmap_write(struct dfs_file *file, struct rt_varea *varea, void *data)
1522{
1523 int ret = 0;
1524
1525 if (file && varea)
1526 {
1527 struct rt_aspace_io_msg *msg = (struct rt_aspace_io_msg *)data;
1528 if (msg)
1529 {
1530 off_t fpos = dfs_aspace_fpos(varea, msg->fault_vaddr);
1531 return dfs_aspace_write(file, msg->buffer_vaddr, ARCH_PAGE_SIZE, &fpos);
1532 }
1533 }
1534
1535 return ret;
1536}
1537
1538#endif
#define MNT_RDONLY
rt_weak rt_tick_t rt_tick_get_millisecond(void)
This function will return the passed millisecond from boot.
#define RT_IPC_FLAG_FIFO
#define RT_IPC_FLAG_PRIO
#define RT_WAITING_FOREVER
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_weak void * rt_calloc(rt_size_t count, rt_size_t size)
This function will contiguously allocate enough space for count objects that are size bytes of memory...
#define rt_list_for_each(pos, head)
#define rt_kprintf(...)
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
#define rt_list_entry(node, type, member)
get the struct for this entry
#define RT_ASSERT(EX)
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
rt_inline void rt_list_insert_after(rt_list_t *l, rt_list_t *n)
insert a node after a list
#define rt_list_for_each_entry(pos, head, member)
rt_weak void rt_free(void *ptr)
This function will release the previously allocated memory block by rt_malloc. The released memory bl...
#define rt_container_of(ptr, type, member)
rt_err_t rt_thread_startup(rt_thread_t thread)
This function will start a thread and put it to system ready queue.
rt_err_t rt_thread_mdelay(rt_int32_t ms)
This function will let current thread delay for some milliseconds.
rt_thread_t rt_thread_self(void)
This function will return self thread object.
rt_thread_t rt_thread_create(const char *name, void(*entry)(void *parameter), void *parameter, rt_uint32_t stack_size, rt_uint8_t priority, rt_uint32_t tick)
This function will create a thread object and allocate thread object memory. and stack.
struct rt_thread * rt_thread_t
#define MSH_CMD_EXPORT_ALIAS(...)
rt_err_t rt_mq_send_wait(rt_mq_t mq, const void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3568
rt_mq_t rt_mq_create(const char *name, rt_size_t msg_size, rt_size_t max_msgs, rt_uint8_t flag)
Creating a messagequeue object.
定义 ipc.c:3229
rt_ssize_t rt_mq_recv(rt_mq_t mq, void *buffer, rt_size_t size, rt_int32_t timeout)
定义 ipc.c:3920
rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t time)
定义 ipc.c:1537
rt_err_t rt_mutex_detach(rt_mutex_t mutex)
This function will detach a static mutex object.
定义 ipc.c:1054
rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag)
Initialize a static mutex object.
定义 ipc.c:1007
rt_err_t rt_mutex_release(rt_mutex_t mutex)
This function will release a mutex. If there is thread suspended on the mutex, the thread will be res...
定义 ipc.c:1589
#define rt_atomic_sub(ptr, v)
#define rt_atomic_add(ptr, v)
#define rt_atomic_store(ptr, v)
#define rt_atomic_load(ptr)
#define LOG_E(fmt,...)
#define INIT_PREV_EXPORT(fn)
#define rt_hw_cpu_dcache_ops(...)
定义 rthw.h:85
@ RT_HW_CACHE_INVALIDATE
定义 rthw.h:56
@ RT_HW_CACHE_FLUSH
定义 rthw.h:55
#define rt_hw_cpu_icache_ops(...)
定义 rthw.h:82
rt_base_t rt_err_t
struct rt_list_node rt_list_t
unsigned int rt_uint32_t
rt_uint32_t rt_ubase_t
#define RT_NULL
struct dfs_mnt * mnt
struct dfs_vnode * vnode
uint32_t flags
char * fullpath
struct dfs_aspace * aspace
struct rt_list_node * next