RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
dfs_mnt.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-05-05 Bernard Implement mnt in dfs v2.0
9 */
10
11#include <rtthread.h>
12
13#include "dfs_private.h"
14
15#include <dfs.h>
16#include <dfs_dentry.h>
17#include <dfs_mnt.h>
18#include <dfs_pcache.h>
19
20#define DBG_TAG "DFS.mnt"
21#define DBG_LVL DBG_WARNING
22#include <rtdbg.h>
23
24static struct dfs_mnt *_root_mnt = RT_NULL;
25
27
28/*
29 * mnt tree structure
30 *
31 * mnt_root <----------------------------------------+
32 * | (child) +----------+ |
33 * v (sibling) v | |
34 * mnt_child0 -> mnt_child1 | |
35 * | (child) | |
36 * v / (parent) | (root)
37 * mnt_child10 ---/
38 *
39 */
40
41struct dfs_mnt *dfs_mnt_create(const char *path)
42{
43 struct dfs_mnt *mnt = rt_calloc(1, sizeof(struct dfs_mnt));
44 if (mnt)
45 {
46 LOG_I("create mnt at %s", path);
47
48 mnt->fullpath = rt_strdup(path);
49 rt_list_init(&mnt->sibling);
50 rt_list_init(&mnt->child);
51 mnt->flags |= MNT_IS_ALLOCED;
52 rt_atomic_store(&(mnt->ref_count), 1);
53 }
54 else
55 {
56 rt_set_errno(-ENOMEM);
57 }
58
59 return mnt;
60}
61
62int dfs_mnt_insert(struct dfs_mnt* mnt, struct dfs_mnt* child)
63{
64 if (child)
65 {
66 if (mnt == RT_NULL)
67 {
68 /* insert into root */
69 mnt = dfs_mnt_lookup(child->fullpath);
70 if (mnt == RT_NULL || (strcmp(child->fullpath, "/") == 0))
71 {
72 /* it's root mnt */
73 mnt = child;
74 mnt->flags |= MNT_IS_LOCKED;
75
76 /* ref to gobal root */
77 if (_root_mnt)
78 {
79 child = _root_mnt;
80 rt_atomic_sub(&(_root_mnt->parent->ref_count), 1);
81 rt_atomic_sub(&(_root_mnt->ref_count), 1);
82 _root_mnt->flags &= ~MNT_IS_LOCKED;
83
84 _root_mnt = dfs_mnt_ref(mnt);
85 mnt->parent = dfs_mnt_ref(mnt);
86 mnt->flags |= MNT_IS_ADDLIST;
87
88 mkdir("/dev", 0777);
89 }
90 else
91 {
92 _root_mnt = dfs_mnt_ref(mnt);
93 }
94 }
95 }
96
97 if (mnt)
98 {
99 child->flags |= MNT_IS_ADDLIST;
100 if (child != mnt)
101 {
102 /* not the root, insert into the child list */
103 rt_list_insert_before(&mnt->child, &child->sibling);
104 /* child ref self */
106 }
107 /* parent ref parent */
108 child->parent = dfs_mnt_ref(mnt);
109 }
110 }
111
112 return 0;
113}
114
115/* remove mnt from mnt_tree */
116int dfs_mnt_remove(struct dfs_mnt* mnt)
117{
118 int ret = -RT_ERROR;
119
120 if (rt_list_isempty(&mnt->child))
121 {
122 rt_list_remove(&mnt->sibling);
123 if (mnt->parent)
124 {
125 /* parent unref parent */
126 rt_atomic_sub(&(mnt->parent->ref_count), 1);
127 }
128
129 ret = RT_EOK;
130 }
131 else
132 {
133 LOG_W("remove a mnt point:%s with child.", mnt->fullpath);
134 }
135
136 return ret;
137}
138
139static struct dfs_mnt *_dfs_mnt_dev_lookup(struct dfs_mnt *mnt, rt_device_t dev_id)
140{
141 struct dfs_mnt *ret = RT_NULL, *iter = RT_NULL;
142
144 {
145 if (iter->dev_id == dev_id)
146 {
147 ret = iter;
148 break;
149 }
150 else
151 {
152 ret = _dfs_mnt_dev_lookup(iter, dev_id);
153 if (ret)
154 {
155 break;
156 }
157 }
158 }
159
160 return ret;
161}
162
164{
165 struct dfs_mnt *mnt = _root_mnt;
166 struct dfs_mnt *ret = RT_NULL;
167
168 if (mnt)
169 {
170 dfs_lock();
171
172 if (mnt->dev_id == dev_id)
173 {
174 dfs_unlock();
175 return mnt;
176 }
177
178 ret = _dfs_mnt_dev_lookup(mnt, dev_id);
179
180 dfs_unlock();
181 }
182
183 return ret;
184}
185
194struct dfs_mnt *dfs_mnt_lookup(const char *fullpath)
195{
196 struct dfs_mnt *mnt = _root_mnt;
197 struct dfs_mnt *iter = RT_NULL;
198
199 if (mnt)
200 {
201 int mnt_len = rt_strlen(mnt->fullpath);
202
203 dfs_lock();
204 if ((strncmp(mnt->fullpath, fullpath, mnt_len) == 0) &&
205 (mnt_len == 1 || (fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
206 {
207 while (!rt_list_isempty(&mnt->child))
208 {
210 {
211 mnt_len = rt_strlen(iter->fullpath);
212 if ((strncmp(iter->fullpath, fullpath, mnt_len) == 0) &&
213 ((fullpath[mnt_len] == '\0') || (fullpath[mnt_len] == '/')))
214 {
215 mnt = iter;
216 break;
217 }
218 }
219
220 if (mnt != iter) break;
221 }
222 }
223 else
224 {
225 mnt = RT_NULL;
226 }
227 dfs_unlock();
228
229 if (mnt)
230 {
231 LOG_D("mnt_lookup: %s path @ mount point %p", fullpath, mnt);
232 DLOG(note, "mnt", "found mnt(%s)", mnt->fs_ops->name);
233 }
234 }
235
236 return mnt;
237}
238
239struct dfs_mnt* dfs_mnt_ref(struct dfs_mnt* mnt)
240{
241 if (mnt)
242 {
243 rt_atomic_add(&(mnt->ref_count), 1);
244 DLOG(note, "mnt", "mnt(%s),ref_count=%d", mnt->fs_ops->name, rt_atomic_load(&(mnt->ref_count)));
245 }
246
247 return mnt;
248}
249
250int dfs_mnt_unref(struct dfs_mnt *mnt)
251{
252 rt_err_t ret = RT_EOK;
254
255 if (mnt)
256 {
257 ref_count = rt_atomic_sub(&(mnt->ref_count), 1) - 1;
258
259 if (ref_count == 0)
260 {
261 dfs_lock();
262
263 if (mnt->flags & MNT_IS_UMOUNT)
264 {
265 mnt->fs_ops->umount(mnt);
266
267 RT_OBJECT_HOOKLIST_CALL(dfs_mnt_umnt, (mnt));
268 }
269
270 /* free full path */
271 rt_free(mnt->fullpath);
272 mnt->fullpath = RT_NULL;
273
274 /* destroy self and the ref_count should be 0 */
275 DLOG(msg, "mnt", "mnt", DLOG_MSG, "free mnt(%s)", mnt->fs_ops->name);
276 rt_free(mnt);
277
278 dfs_unlock();
279 }
280 else
281 {
282 DLOG(note, "mnt", "mnt(%s),ref_count=%d", mnt->fs_ops->name, rt_atomic_load(&(mnt->ref_count)));
283 }
284 }
285
286 return ret;
287}
288
289int dfs_mnt_setflags(struct dfs_mnt *mnt, int flags)
290{
291 int error = 0;
292
293 if (flags & MS_RDONLY)
294 {
295 mnt->flags |= MNT_RDONLY;
296#ifdef RT_USING_PAGECACHE
297 dfs_pcache_clean(mnt);
298#endif
299 }
300
301 return error;
302}
303
304int dfs_mnt_destroy(struct dfs_mnt* mnt)
305{
306 rt_err_t ret = RT_EOK;
307
308 if (mnt)
309 {
310 if (mnt->flags & MNT_IS_MOUNTED)
311 {
312 mnt->flags &= ~MNT_IS_MOUNTED;
313 mnt->flags |= MNT_IS_UMOUNT;
314 /* remote it from mnt list */
315 if (mnt->flags & MNT_IS_ADDLIST)
316 {
317 dfs_mnt_remove(mnt);
318 }
319 }
320
321 dfs_mnt_unref(mnt);
322 }
323
324 return ret;
325}
326
327static struct dfs_mnt* _dfs_mnt_foreach(struct dfs_mnt *mnt, struct dfs_mnt* (*func)(struct dfs_mnt *mnt, void *parameter), void *parameter)
328{
329 struct dfs_mnt *iter, *ret = NULL;
330
331 if (mnt)
332 {
333 ret = func(mnt, parameter);
334 if (ret == RT_NULL)
335 {
336 if (!rt_list_isempty(&mnt->child))
337 {
338 /* for each in mount point list */
340 {
341 ret = _dfs_mnt_foreach(iter, func, parameter);
342 if (ret != RT_NULL)
343 {
344 break;
345 }
346 }
347 }
348 }
349 }
350 else
351 {
352 ret = RT_NULL;
353 }
354
355 return ret;
356}
357
358static struct dfs_mnt* _mnt_cmp_devid(struct dfs_mnt *mnt, void *device)
359{
360 struct dfs_mnt *ret = RT_NULL;
361 struct rt_device *dev = (struct rt_device*)device;
362
363 if (dev && mnt)
364 {
365 if (mnt->dev_id == dev)
366 {
367 ret = mnt;
368 }
369 }
370
371 return ret;
372}
373
381const char *dfs_mnt_get_mounted_path(struct rt_device *device)
382{
383 const char* path = RT_NULL;
384
385 if (_root_mnt)
386 {
387 struct dfs_mnt* mnt;
388
389 dfs_lock();
390 mnt = _dfs_mnt_foreach(_root_mnt, _mnt_cmp_devid, device);
391 dfs_unlock();
392
393 if (mnt) path = mnt->fullpath;
394 }
395
396 return path;
397}
398
399static struct dfs_mnt* _mnt_dump(struct dfs_mnt *mnt, void *parameter)
400{
401 if (mnt)
402 {
403 if (mnt->dev_id)
404 {
405 rt_kprintf("%-10s %-6s %-10s %d\n",
406 mnt->fs_ops->name, mnt->dev_id->parent.name, mnt->fullpath, rt_atomic_load(&(mnt->ref_count)));
407 }
408 else
409 {
410 rt_kprintf("%-10s (NULL) %-10s %d\n",
411 mnt->fs_ops->name, mnt->fullpath, rt_atomic_load(&(mnt->ref_count)));
412 }
413 }
414
415 return RT_NULL;
416}
417
418static struct dfs_mnt* _mnt_cmp_path(struct dfs_mnt* mnt, void *parameter)
419{
420 const char* fullpath = (const char*)parameter;
421 struct dfs_mnt *ret = RT_NULL;
422
423 if (strncmp(mnt->fullpath, fullpath, rt_strlen(fullpath)) == 0)
424 {
425 ret = mnt;
426 }
427
428 return ret;
429}
430
432{
433 int ret = RT_FALSE;
434
435 if (mnt && fullpath)
436 {
437 struct dfs_mnt *m = RT_NULL;
438
439 dfs_lock();
440 m = _dfs_mnt_foreach(mnt, _mnt_cmp_path, (void*)fullpath);
441 dfs_unlock();
442
443 if (m)
444 {
445 ret = RT_TRUE;
446 }
447 }
448
449 return ret;
450}
451
452int dfs_mnt_list(struct dfs_mnt *mnt)
453{
454 if (!mnt) mnt = _root_mnt;
455
456 /* lock file system */
457 dfs_lock();
458 _dfs_mnt_foreach(mnt, _mnt_dump, RT_NULL);
459 /* unlock file system */
460 dfs_unlock();
461
462 return 0;
463}
464
465int dfs_mnt_foreach(struct dfs_mnt* (*func)(struct dfs_mnt *mnt, void *parameter), void *parameter)
466{
467 /* lock file system */
468 dfs_lock();
469 _dfs_mnt_foreach(_root_mnt, func, parameter);
470 /* unlock file system */
471 dfs_unlock();
472
473 return 0;
474}
rt_err_t dfs_lock(void)
定义 dfs.c:120
void dfs_unlock(void)
定义 dfs.c:137
#define MS_RDONLY
int dfs_mnt_insert(struct dfs_mnt *mnt, struct dfs_mnt *child)
int dfs_mnt_setflags(struct dfs_mnt *mnt, int flags)
rt_bool_t dfs_mnt_has_child_mnt(struct dfs_mnt *mnt, const char *fullpath)
int dfs_mnt_remove(struct dfs_mnt *mnt)
struct dfs_mnt * dfs_mnt_dev_lookup(rt_device_t dev_id)
int dfs_mnt_unref(struct dfs_mnt *mnt)
const char * dfs_mnt_get_mounted_path(struct rt_device *device)
int dfs_mnt_destroy(struct dfs_mnt *mnt)
int dfs_mnt_foreach(struct dfs_mnt *(*func)(struct dfs_mnt *mnt, void *parameter), void *parameter)
struct dfs_mnt * dfs_mnt_lookup(const char *fullpath)
int dfs_mnt_list(struct dfs_mnt *mnt)
struct dfs_mnt * dfs_mnt_create(const char *path)
struct dfs_mnt * dfs_mnt_ref(struct dfs_mnt *mnt)
#define MNT_IS_ADDLIST
#define MNT_IS_UMOUNT
#define MNT_IS_MOUNTED
#define MNT_IS_ALLOCED
#define MNT_IS_LOCKED
#define MNT_RDONLY
struct rt_device * rt_device_t
int mkdir(const char *path, mode_t mode)
#define RT_OBJECT_HOOKLIST_CALL(name, argv)
#define RT_OBJECT_HOOKLIST_DEFINE(name)
rt_inline void rt_list_remove(rt_list_t *n)
remove node from list.
rt_weak void * rt_calloc(rt_size_t count, rt_size_t size)
This function will contiguously allocate enough space for count objects that are size bytes of memory...
rt_inline int rt_list_isempty(const rt_list_t *l)
tests whether a list is empty
#define rt_kprintf(...)
rt_inline void rt_list_insert_before(rt_list_t *l, rt_list_t *n)
insert a node before a list
rt_inline void rt_list_init(rt_list_t *l)
initialize a list
#define rt_list_for_each_entry(pos, head, member)
rt_weak void rt_free(void *ptr)
This function will release the previously allocated memory block by rt_malloc. The released memory bl...
#define rt_atomic_sub(ptr, v)
#define rt_atomic_add(ptr, v)
#define rt_atomic_store(ptr, v)
#define rt_atomic_load(ptr)
#define DLOG(...)
定义 rtdbg.h:53
#define LOG_W(...)
#define LOG_D(...)
#define LOG_I(...)
rt_int32_t rt_base_t
int rt_bool_t
rt_base_t rt_err_t
#define RT_TRUE
#define RT_FALSE
#define RT_NULL
int(* umount)(struct dfs_mnt *mnt)
const char * name
char * fullpath
rt_device_t dev_id
rt_list_t sibling
const struct dfs_filesystem_ops * fs_ops
rt_list_t child
rt_atomic_t ref_count
struct dfs_mnt * parent
struct rt_object parent
const char * name