RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
dfs_file_mmap.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 */
9
10#include "dfs_file.h"
11#include "dfs_dentry.h"
12#include "dfs_mnt.h"
13
14#define DBG_TAG "dfs.mmap"
15#define DBG_LVL DBG_WARNING
16#include <rtdbg.h>
17
18#if defined(RT_USING_SMART) && defined(ARCH_MM_MMU) && defined(RT_USING_PAGECACHE)
19
20#include "dfs_pcache.h"
21
22#include <lwp.h>
23
24#include <sys/mman.h>
25
26#include <lwp_user_mm.h>
27#include <mm_aspace.h>
28#include <mm_fault.h>
29#include <mm_flag.h>
30#include <mm_page.h>
31#include <mmu.h>
32#include <page.h>
33#include <tlb.h>
34
35static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file);
36static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj);
37
38static void *_do_mmap(struct rt_lwp *lwp, void *map_vaddr, size_t map_size, size_t attr,
39 mm_flag_t flags, off_t pgoffset, void *data, rt_err_t *code)
40{
41 int ret = 0;
42 void *vaddr = map_vaddr;
43 rt_mem_obj_t mem_obj = dfs_get_mem_obj(data);
44
45 ret = rt_aspace_map(lwp->aspace, &vaddr, map_size,
46 attr, flags, mem_obj, pgoffset);
47 if (ret != RT_EOK)
48 {
49 vaddr = RT_NULL;
50 LOG_E("failed to map %lx with size %lx with errno %d", map_vaddr,
51 map_size, ret);
52 }
53
54 if (code)
55 {
56 *code = ret;
57 }
58
59 return vaddr;
60}
61
62static void *_map_data_to_uspace(struct dfs_mmap2_args *mmap2, void *data, rt_err_t *code)
63{
64 size_t offset = 0;
65 void *map_vaddr = mmap2->addr;
66 size_t map_size = mmap2->length;
67 struct rt_lwp *lwp = mmap2->lwp;
68 rt_size_t k_attr;
69 rt_size_t k_flags;
70
71 if (map_size)
72 {
73 offset = (size_t)map_vaddr & ARCH_PAGE_MASK;
74 map_size += (offset + ARCH_PAGE_SIZE - 1);
75 map_size &= ~ARCH_PAGE_MASK;
76 map_vaddr = (void *)((size_t)map_vaddr & ~ARCH_PAGE_MASK);
77
78 k_flags = lwp_user_mm_flag_to_kernel(mmap2->flags);
79 k_attr = lwp_user_mm_attr_to_kernel(mmap2->prot);
80
81 map_vaddr = _do_mmap(lwp, map_vaddr, map_size, k_attr, k_flags, mmap2->pgoffset, data, code);
82 }
83
84 return map_vaddr;
85}
86
87static void hint_free(rt_mm_va_hint_t hint)
88{
89}
90
91static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
92{
93 void *page;
94 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
95
96 if (file)
97 {
98 LOG_I("%s varea: %p", __func__, varea);
99 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
100 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
101 LOG_I("fault vaddr: %p", msg->fault_vaddr);
102
103 if (file->dentry)
104 {
105 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
106 }
107
108 page = dfs_aspace_mmap(file, varea, msg->fault_vaddr);
109 if (page)
110 {
111 msg->response.status = MM_FAULT_STATUS_OK_MAPPED;
112 msg->response.size = ARCH_PAGE_SIZE;
113 msg->response.vaddr = page;
114 }
115 else
116 {
117 LOG_E("%s varea %p mmap failed at vaddr %p", __func__, varea, msg->fault_vaddr);
118 }
119 }
120 else
121 {
122 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
123 }
124}
125
126/* do pre open bushiness like inc a ref */
127static void on_varea_open(struct rt_varea *varea)
128{
129 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
130 varea->data = RT_NULL;
131 rt_atomic_add(&(file->ref_count), 1);
132}
133
134/* do post close bushiness like def a ref */
135static void on_varea_close(struct rt_varea *varea)
136{
137 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
138
139 if (file)
140 {
141 LOG_I("%s varea: %p", __func__, varea);
142 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
143 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
144
145 if (file->dentry)
146 {
147 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
148 }
149
150 dfs_aspace_unmap(file, varea);
152 if (rt_atomic_load(&(file->ref_count)) == 1)
153 {
154 dfs_file_close(file);
155 }
156 else
157 {
158 rt_atomic_sub(&(file->ref_count), 1);
159 }
161 }
162 else
163 {
164 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
165 }
166}
167
168static const char *get_name(rt_varea_t varea)
169{
170 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
171
172 return (file && file->dentry) ? file->dentry->pathname : "file-mapper";
173}
174
175void page_read(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
176{
177 rt_ubase_t ret;
178 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
179
180 if (file)
181 {
182 LOG_I("%s varea: %p", __func__, varea);
183 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
184 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
185
186 ret = dfs_aspace_mmap_read(file, varea, msg);
187 if (ret >= 0)
188 {
189 msg->response.status = MM_FAULT_STATUS_OK;
190 if (ret < ARCH_PAGE_SIZE)
191 {
192 memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
193 }
194 }
195 }
196 else
197 {
198 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
199 }
200}
201
202void page_write(struct rt_varea *varea, struct rt_aspace_io_msg *msg)
203{
204 rt_ubase_t ret;
205 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
206
207 if (file)
208 {
209 LOG_I("%s varea: %p", __func__, varea);
210 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
211 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
212
213 ret = dfs_aspace_mmap_write(file, varea, msg);
214 if (ret > 0)
215 {
216 msg->response.status = MM_FAULT_STATUS_OK;
217 if (ret < ARCH_PAGE_SIZE)
218 {
219 memset((char *)msg->buffer_vaddr + ret, 0, ARCH_PAGE_SIZE - ret);
220 }
221 }
222 }
223 else
224 {
225 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
226 }
227}
228
229static rt_err_t unmap_pages(rt_varea_t varea, void *rm_start, void *rm_end)
230{
231 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
232
233 if (file)
234 {
235 LOG_I("%s varea: %p start: %p end: %p", __func__, varea, rm_start, rm_end);
236
237 RT_ASSERT(!((rt_ubase_t)rm_start & ARCH_PAGE_MASK));
238 RT_ASSERT(!((rt_ubase_t)rm_end & ARCH_PAGE_MASK));
239 while (rm_start != rm_end)
240 {
241 dfs_aspace_page_unmap(file, varea, rm_start);
242 rm_start += ARCH_PAGE_SIZE;
243 }
244
245 return RT_EOK;
246 }
247 else
248 {
249 LOG_E("%s varea %p not a file, vaddr %p", __func__, varea, varea->start);
250 }
251
252 return -RT_ERROR;
253}
254
255rt_err_t on_varea_shrink(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
256{
257 char *varea_start = varea->start;
258 void *rm_start;
259 void *rm_end;
260
261 LOG_I("%s varea: %p", __func__, varea);
262 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
263 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
264 LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
265
266 if (varea_start == (char *)new_vaddr)
267 {
268 rm_start = varea_start + size;
269 rm_end = varea_start + varea->size;
270 }
271 else
272 {
273 rm_start = varea_start;
274 rm_end = new_vaddr;
275 }
276
277 return unmap_pages(varea, rm_start, rm_end);
278}
279
280rt_err_t on_varea_expand(struct rt_varea *varea, void *new_vaddr, rt_size_t size)
281{
282 LOG_I("%s varea: %p", __func__, varea);
283 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
284 varea->start, varea->size, varea->offset, varea->attr, varea->flag);
285 LOG_I("new_vaddr: %p size: %p", new_vaddr, size);
286
287 return RT_EOK;
288}
289
290rt_err_t on_varea_split(struct rt_varea *existed, void *unmap_start, rt_size_t unmap_len, struct rt_varea *subset)
291{
292 rt_err_t rc;
293 struct dfs_file *file = dfs_mem_obj_get_file(existed->mem_obj);
294
295 if (file)
296 {
297 LOG_I("%s varea: %p", __func__, existed);
298 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
299 existed->start, existed->size, existed->offset, existed->attr, existed->flag);
300 LOG_I("unmap_start: %p unmap_len: %p", unmap_start, unmap_len);
301
302 if (file->dentry)
303 {
304 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
305 }
306
307 rc = unmap_pages(existed, unmap_start, (char *)unmap_start + unmap_len);
308 if (!rc)
309 {
310 rc = unmap_pages(existed, subset->start, (char *)subset->start + subset->size);
311 if (!rc)
312 on_varea_open(subset);
313 }
314
315 return rc;
316 }
317 else
318 {
319 LOG_E("%s varea %p not a file, vaddr %p", __func__, existed, existed->start);
320 }
321
322 return -RT_ERROR;
323}
324
325rt_err_t on_varea_merge(struct rt_varea *merge_to, struct rt_varea *merge_from)
326{
327 struct dfs_file *file = dfs_mem_obj_get_file(merge_from->mem_obj);
328
329 if (file)
330 {
331 LOG_I("%s varea: %p", __func__, merge_from);
332 LOG_I("varea start: %p size: 0x%x offset: 0x%x attr: 0x%x flag: 0x%x",
333 merge_from->start, merge_from->size, merge_from->offset, merge_from->attr, merge_from->flag);
334
335 if (file->dentry)
336 {
337 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
338 }
339
340 dfs_aspace_unmap(file, merge_from);
341 on_varea_close(merge_from);
342
343 return RT_EOK;
344 }
345 else
346 {
347 LOG_E("%s varea %p not a file, vaddr %p", __func__, merge_from, merge_from->start);
348 }
349
350 return -RT_ERROR;
351}
352
353void *on_varea_mremap(struct rt_varea *varea, rt_size_t new_size, int flags, void *new_address)
354{
355 void *vaddr = RT_NULL;
356 struct dfs_file *file = dfs_mem_obj_get_file(varea->mem_obj);
357
358#ifndef MREMAP_MAYMOVE
359#define MREMAP_MAYMOVE 1
360#endif
361
362 if (file && flags == MREMAP_MAYMOVE)
363 {
364 int ret;
365 rt_mem_obj_t mem_obj = dfs_get_mem_obj(file);
366
367 vaddr = new_address ? new_address : varea->start;
368 new_size = (new_size + ARCH_PAGE_SIZE - 1);
369 new_size &= ~ARCH_PAGE_MASK;
370 ret = rt_aspace_map(varea->aspace, &vaddr, new_size, varea->attr, varea->flag, mem_obj, varea->offset);
371 if (ret != RT_EOK)
372 {
373 LOG_E("failed to map %lx with size %lx with errno %d", vaddr, new_size, ret);
374 vaddr = RT_NULL;
375 }
376 else
377 {
378 LOG_I("old: %p size: %p new: %p size: %p", varea->start, varea->size, vaddr, new_size);
379 }
380 }
381
382 return vaddr;
383}
384
385static struct rt_mem_obj _mem_obj =
386{
387 .hint_free = hint_free,
388 .on_page_fault = on_page_fault,
389 .on_varea_open = on_varea_open,
390 .on_varea_close = on_varea_close,
391 .get_name = get_name,
392
393 .page_read = page_read,
394 .page_write = page_write,
395
396 .on_varea_shrink = on_varea_shrink,
397 .on_varea_expand = on_varea_expand,
398 .on_varea_split = on_varea_split,
399 .on_varea_merge = on_varea_merge,
400
401 .on_varea_mremap = on_varea_mremap,
402};
403
404struct dfs_mem_obj {
405 struct rt_mem_obj mem_obj;
406 void *file;
407};
408
409static rt_mem_obj_t dfs_get_mem_obj(struct dfs_file *file)
410{
411 rt_mem_obj_t mobj = file->mmap_context;
412 if (!mobj)
413 {
414 struct dfs_mem_obj *dfs_mobj;
416 dfs_mobj = rt_malloc(sizeof(*dfs_mobj));
417 if (dfs_mobj)
418 {
419 dfs_mobj->file = file;
420 mobj = &dfs_mobj->mem_obj;
421 memcpy(mobj, &_mem_obj, sizeof(*mobj));
422 file->mmap_context = mobj;
423 }
425 }
426 return mobj;
427}
428
429static void *dfs_mem_obj_get_file(rt_mem_obj_t mem_obj)
430{
431 struct dfs_mem_obj *dfs_mobj;
432 dfs_mobj = rt_container_of(mem_obj, struct dfs_mem_obj, mem_obj);
433 return dfs_mobj->file;
434}
435
436int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
437{
438 rt_err_t ret = -EINVAL;
439 void *map_vaddr;
440
441 LOG_I("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
442 mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
443 if (file && file->vnode)
444 {
445 if (file->vnode->aspace)
446 {
447 /* create a va area in user space (lwp) */
448 map_vaddr = _map_data_to_uspace(mmap2, file, &ret);
449 if (map_vaddr)
450 {
451 mmap2->ret = map_vaddr;
452 LOG_I("file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
453 }
454 }
455 else
456 {
457 LOG_E("File mapping is not supported, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
458 }
459 }
460
461 return ret;
462}
463#else
464int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
465{
466 LOG_E("File mapping support is not enabled, file: %s%s", file->dentry->mnt->fullpath, file->dentry->pathname);
467 LOG_E("mmap2 args addr: %p length: 0x%x prot: %d flags: 0x%x pgoffset: 0x%x",
468 mmap2->addr, mmap2->length, mmap2->prot, mmap2->flags, mmap2->pgoffset);
469
470 return -EPERM;
471}
472#endif
int dfs_file_close(struct dfs_file *file)
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2)
void dfs_file_unlock(void)
定义 dfs.c:164
rt_err_t dfs_file_lock(void)
定义 dfs.c:147
#define RT_ASSERT(EX)
rt_weak void * rt_malloc(rt_size_t size)
Allocate a block of memory with a minimum of 'size' bytes.
#define rt_container_of(ptr, type, member)
#define rt_atomic_sub(ptr, v)
#define rt_atomic_add(ptr, v)
#define rt_atomic_load(ptr)
#define LOG_E(fmt,...)
#define LOG_I(...)
rt_base_t rt_err_t
rt_ubase_t rt_size_t
rt_uint32_t rt_ubase_t
#define RT_NULL
struct dfs_mnt * mnt
struct dfs_vnode * vnode
struct dfs_dentry * dentry
rt_atomic_t ref_count
void * mmap_context
uint32_t flags
char * fullpath
struct dfs_aspace * aspace