RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
mem.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2024, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2008-7-12 Bernard the first version
9 * 2010-06-09 Bernard fix the end stub of heap
10 * fix memory check in rt_realloc function
11 * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
12 * 2010-10-14 Bernard fix rt_realloc issue when realloc a NULL pointer.
13 * 2017-07-14 armink fix rt_realloc issue when new size is 0
14 * 2018-10-02 Bernard Add 64bit support
15 */
16
17/*
18 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
19 * All rights reserved.
20 *
21 * Redistribution and use in source and binary forms, with or without modification,
22 * are permitted provided that the following conditions are met:
23 *
24 * 1. Redistributions of source code must retain the above copyright notice,
25 * this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials provided with the distribution.
29 * 3. The name of the author may not be used to endorse or promote products
30 * derived from this software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
33 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
34 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
35 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
36 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
37 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
38 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
39 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
41 * OF SUCH DAMAGE.
42 *
43 * This file is part of the lwIP TCP/IP stack.
44 *
45 * Author: Adam Dunkels <adam@sics.se>
46 * Simon Goldschmidt
47 *
48 */
49
50#include <rthw.h>
51#include <rtthread.h>
52
53#if defined (RT_USING_SMALL_MEM)
54
55#define DBG_TAG "kernel.mem"
56#define DBG_LVL DBG_INFO
57#include <rtdbg.h>
58
59struct rt_small_mem_item
60{
61 rt_uintptr_t pool_ptr;
62 rt_size_t next;
63 rt_size_t prev;
64#ifdef RT_USING_MEMTRACE
65#ifdef ARCH_CPU_64BIT
66 rt_uint8_t thread[8];
67#else
68 rt_uint8_t thread[4];
69#endif /* ARCH_CPU_64BIT */
70#endif /* RT_USING_MEMTRACE */
71};
72
76struct rt_small_mem
77{
78 struct rt_memory parent;
79 rt_uint8_t *heap_ptr;
80 struct rt_small_mem_item *heap_end;
81 struct rt_small_mem_item *lfree;
82 rt_size_t mem_size_aligned;
83};
84
85#define MIN_SIZE (sizeof(rt_uintptr_t) + sizeof(rt_size_t) + sizeof(rt_size_t))
86
87#define MEM_MASK ((~(rt_size_t)0) - 1)
88
89#define MEM_USED(_mem) ((((rt_uintptr_t)(_mem)) & MEM_MASK) | 0x1)
90#define MEM_FREED(_mem) ((((rt_uintptr_t)(_mem)) & MEM_MASK) | 0x0)
91#define MEM_ISUSED(_mem) \
92 (((rt_uintptr_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (~MEM_MASK))
93#define MEM_POOL(_mem) \
94 ((struct rt_small_mem *)(((rt_uintptr_t)(((struct rt_small_mem_item *)(_mem))->pool_ptr)) & (MEM_MASK)))
95#define MEM_SIZE(_heap, _mem) \
96 (((struct rt_small_mem_item *)(_mem))->next - ((rt_uintptr_t)(_mem) - \
97 (rt_uintptr_t)((_heap)->heap_ptr)) - RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE))
98
99#define MIN_SIZE_ALIGNED RT_ALIGN(MIN_SIZE, RT_ALIGN_SIZE)
100#define SIZEOF_STRUCT_MEM RT_ALIGN(sizeof(struct rt_small_mem_item), RT_ALIGN_SIZE)
101
102#ifdef RT_USING_MEMTRACE
103rt_inline void rt_smem_setname(struct rt_small_mem_item *mem, const char *name)
104{
105 int index;
106 for (index = 0; index < sizeof(mem->thread); index ++)
107 {
108 if (name[index] == '\0') break;
109 mem->thread[index] = name[index];
110 }
111
112 for (; index < sizeof(mem->thread); index ++)
113 {
114 mem->thread[index] = ' ';
115 }
116}
117#endif /* RT_USING_MEMTRACE */
118
119static void plug_holes(struct rt_small_mem *m, struct rt_small_mem_item *mem)
120{
121 struct rt_small_mem_item *nmem;
122 struct rt_small_mem_item *pmem;
123
124 RT_ASSERT((rt_uint8_t *)mem >= m->heap_ptr);
125 RT_ASSERT((rt_uint8_t *)mem < (rt_uint8_t *)m->heap_end);
126
127 /* plug hole forward */
128 nmem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next];
129 if (mem != nmem && !MEM_ISUSED(nmem) &&
130 (rt_uint8_t *)nmem != (rt_uint8_t *)m->heap_end)
131 {
132 /* if mem->next is unused and not end of m->heap_ptr,
133 * combine mem and mem->next
134 */
135 if (m->lfree == nmem)
136 {
137 m->lfree = mem;
138 }
139 nmem->pool_ptr = 0;
140 mem->next = nmem->next;
141 ((struct rt_small_mem_item *)&m->heap_ptr[nmem->next])->prev = (rt_uint8_t *)mem - m->heap_ptr;
142 }
143
144 /* plug hole backward */
145 pmem = (struct rt_small_mem_item *)&m->heap_ptr[mem->prev];
146 if (pmem != mem && !MEM_ISUSED(pmem))
147 {
148 /* if mem->prev is unused, combine mem and mem->prev */
149 if (m->lfree == mem)
150 {
151 m->lfree = pmem;
152 }
153 mem->pool_ptr = 0;
154 pmem->next = mem->next;
155 ((struct rt_small_mem_item *)&m->heap_ptr[mem->next])->prev = (rt_uint8_t *)pmem - m->heap_ptr;
156 }
157}
158
170rt_smem_t rt_smem_init(const char *name,
171 void *begin_addr,
172 rt_size_t size)
173{
174 struct rt_small_mem_item *mem;
175 struct rt_small_mem *small_mem;
176 rt_uintptr_t start_addr, begin_align, end_align, mem_size;
177
178 small_mem = (struct rt_small_mem *)RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE);
179 start_addr = (rt_uintptr_t)small_mem + sizeof(*small_mem);
180 begin_align = RT_ALIGN((rt_uintptr_t)start_addr, RT_ALIGN_SIZE);
181 end_align = RT_ALIGN_DOWN((rt_uintptr_t)begin_addr + size, RT_ALIGN_SIZE);
182
183 /* alignment addr */
184 if ((end_align > (2 * SIZEOF_STRUCT_MEM)) &&
185 ((end_align - 2 * SIZEOF_STRUCT_MEM) >= start_addr))
186 {
187 /* calculate the aligned memory size */
188 mem_size = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM;
189 }
190 else
191 {
192 rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n",
193 (rt_uintptr_t)begin_addr, (rt_uintptr_t)begin_addr + size);
194
195 return RT_NULL;
196 }
197
198 rt_memset(small_mem, 0, sizeof(*small_mem));
199 /* initialize small memory object */
200 rt_object_init(&(small_mem->parent.parent), RT_Object_Class_Memory, name);
201 small_mem->parent.algorithm = "small";
202 small_mem->parent.address = begin_align;
203 small_mem->parent.total = mem_size;
204 small_mem->mem_size_aligned = mem_size;
205
206 /* point to begin address of heap */
207 small_mem->heap_ptr = (rt_uint8_t *)begin_align;
208
209 LOG_D("mem init, heap begin address 0x%x, size %d",
210 (rt_uintptr_t)small_mem->heap_ptr, small_mem->mem_size_aligned);
211
212 /* initialize the start of the heap */
213 mem = (struct rt_small_mem_item *)small_mem->heap_ptr;
214 mem->pool_ptr = MEM_FREED(small_mem);
215 mem->next = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
216 mem->prev = 0;
217#ifdef RT_USING_MEMTRACE
218 rt_smem_setname(mem, "INIT");
219#endif /* RT_USING_MEMTRACE */
220
221 /* initialize the end of the heap */
222 small_mem->heap_end = (struct rt_small_mem_item *)&small_mem->heap_ptr[mem->next];
223 small_mem->heap_end->pool_ptr = MEM_USED(small_mem);
224 small_mem->heap_end->next = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
225 small_mem->heap_end->prev = small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM;
226#ifdef RT_USING_MEMTRACE
227 rt_smem_setname(small_mem->heap_end, "INIT");
228#endif /* RT_USING_MEMTRACE */
229
230 /* initialize the lowest-free pointer to the start of the heap */
231 small_mem->lfree = (struct rt_small_mem_item *)small_mem->heap_ptr;
232
233 return &small_mem->parent;
234}
236
255
259
261
272{
273 rt_size_t ptr, ptr2;
274 struct rt_small_mem_item *mem, *mem2;
275 struct rt_small_mem *small_mem;
276
277 if (size == 0)
278 return RT_NULL;
279
280 RT_ASSERT(m != RT_NULL);
283
284 small_mem = (struct rt_small_mem *)m;
285 /* alignment size */
286 size = RT_ALIGN(size, RT_ALIGN_SIZE);
287
288 /* every data block must be at least MIN_SIZE_ALIGNED long */
289 if (size < MIN_SIZE_ALIGNED)
290 size = MIN_SIZE_ALIGNED;
291
292 if (size > small_mem->mem_size_aligned)
293 {
294 LOG_D("no memory");
295
296 return RT_NULL;
297 }
298
299 for (ptr = (rt_uint8_t *)small_mem->lfree - small_mem->heap_ptr;
300 ptr <= small_mem->mem_size_aligned - size;
301 ptr = ((struct rt_small_mem_item *)&small_mem->heap_ptr[ptr])->next)
302 {
303 mem = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr];
304
305 if ((!MEM_ISUSED(mem)) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size)
306 {
307 /* mem is not used and at least perfect fit is possible:
308 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
309
310 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >=
312 {
313 /* (in addition to the above, we test if another struct rt_small_mem_item (SIZEOF_STRUCT_MEM) containing
314 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
315 * -> split large block, create empty remainder,
316 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
317 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
318 * struct rt_small_mem_item would fit in but no data between mem2 and mem2->next
319 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
320 * region that couldn't hold data, but when mem->next gets freed,
321 * the 2 regions would be combined, resulting in more free memory
322 */
323 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
324
325 /* create mem2 struct */
326 mem2 = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr2];
327 mem2->pool_ptr = MEM_FREED(small_mem);
328 mem2->next = mem->next;
329 mem2->prev = ptr;
330#ifdef RT_USING_MEMTRACE
331 rt_smem_setname(mem2, " ");
332#endif /* RT_USING_MEMTRACE */
333
334 /* and insert it between mem and mem->next */
335 mem->next = ptr2;
336
337 if (mem2->next != small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM)
338 {
339 ((struct rt_small_mem_item *)&small_mem->heap_ptr[mem2->next])->prev = ptr2;
340 }
341 small_mem->parent.used += (size + SIZEOF_STRUCT_MEM);
342 if (small_mem->parent.max < small_mem->parent.used)
343 small_mem->parent.max = small_mem->parent.used;
344 }
345 else
346 {
347 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
348 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
349 * take care of this).
350 * -> near fit or excact fit: do not split, no mem2 creation
351 * also can't move mem->next directly behind mem, since mem->next
352 * will always be used at this point!
353 */
354 small_mem->parent.used += mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr);
355 if (small_mem->parent.max < small_mem->parent.used)
356 small_mem->parent.max = small_mem->parent.used;
357 }
358 /* set small memory object */
359 mem->pool_ptr = MEM_USED(small_mem);
360#ifdef RT_USING_MEMTRACE
361 if (rt_thread_self())
362 rt_smem_setname(mem, rt_thread_self()->parent.name);
363 else
364 rt_smem_setname(mem, "NONE");
365#endif /* RT_USING_MEMTRACE */
366
367 if (mem == small_mem->lfree)
368 {
369 /* Find next free block after mem and update lowest free pointer */
370 while (MEM_ISUSED(small_mem->lfree) && small_mem->lfree != small_mem->heap_end)
371 small_mem->lfree = (struct rt_small_mem_item *)&small_mem->heap_ptr[small_mem->lfree->next];
372
373 RT_ASSERT(((small_mem->lfree == small_mem->heap_end) || (!MEM_ISUSED(small_mem->lfree))));
374 }
375 RT_ASSERT((rt_uintptr_t)mem + SIZEOF_STRUCT_MEM + size <= (rt_uintptr_t)small_mem->heap_end);
376 RT_ASSERT((rt_uintptr_t)((rt_uint8_t *)mem + SIZEOF_STRUCT_MEM) % RT_ALIGN_SIZE == 0);
377 RT_ASSERT((((rt_uintptr_t)mem) & (RT_ALIGN_SIZE - 1)) == 0);
378
379 LOG_D("allocate memory at 0x%x, size: %d",
381 (rt_uintptr_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
382
383 /* return the memory data except mem struct */
384 return (rt_uint8_t *)mem + SIZEOF_STRUCT_MEM;
385 }
386 }
387
388 return RT_NULL;
389}
391
403void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize)
404{
405 rt_size_t size;
406 rt_size_t ptr, ptr2;
407 struct rt_small_mem_item *mem, *mem2;
408 struct rt_small_mem *small_mem;
409 void *nmem;
410
411 RT_ASSERT(m != RT_NULL);
414
415 small_mem = (struct rt_small_mem *)m;
416 /* alignment size */
417 newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE);
418 if (newsize > small_mem->mem_size_aligned)
419 {
420 LOG_D("realloc: out of memory");
421
422 return RT_NULL;
423 }
424 else if (newsize == 0)
425 {
426 rt_smem_free(rmem);
427 return RT_NULL;
428 }
429
430 /* allocate a new memory block */
431 if (rmem == RT_NULL)
432 return rt_smem_alloc(&small_mem->parent, newsize);
433
434 RT_ASSERT((((rt_uintptr_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
435 RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr);
436 RT_ASSERT((rt_uint8_t *)rmem < (rt_uint8_t *)small_mem->heap_end);
437
438 mem = (struct rt_small_mem_item *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
439
440 /* current memory block size */
441 ptr = (rt_uint8_t *)mem - small_mem->heap_ptr;
442 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
443 if (size == newsize)
444 {
445 /* the size is the same as */
446 return rmem;
447 }
448
449 if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE < size)
450 {
451 /* split memory block */
452 small_mem->parent.used -= (size - newsize);
453
454 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
455 mem2 = (struct rt_small_mem_item *)&small_mem->heap_ptr[ptr2];
456 mem2->pool_ptr = MEM_FREED(small_mem);
457 mem2->next = mem->next;
458 mem2->prev = ptr;
459#ifdef RT_USING_MEMTRACE
460 rt_smem_setname(mem2, " ");
461#endif /* RT_USING_MEMTRACE */
462 mem->next = ptr2;
463 if (mem2->next != small_mem->mem_size_aligned + SIZEOF_STRUCT_MEM)
464 {
465 ((struct rt_small_mem_item *)&small_mem->heap_ptr[mem2->next])->prev = ptr2;
466 }
467
468 if (mem2 < small_mem->lfree)
469 {
470 /* the splited struct is now the lowest */
471 small_mem->lfree = mem2;
472 }
473
474 plug_holes(small_mem, mem2);
475
476 return rmem;
477 }
478
479 /* expand memory */
480 nmem = rt_smem_alloc(&small_mem->parent, newsize);
481 if (nmem != RT_NULL) /* check memory */
482 {
483 rt_memcpy(nmem, rmem, size < newsize ? size : newsize);
484 rt_smem_free(rmem);
485 }
486
487 return nmem;
488}
490
497void rt_smem_free(void *rmem)
498{
499 struct rt_small_mem_item *mem;
500 struct rt_small_mem *small_mem;
501
502 if (rmem == RT_NULL)
503 return;
504
505 RT_ASSERT((((rt_uintptr_t)rmem) & (RT_ALIGN_SIZE - 1)) == 0);
506
507 /* Get the corresponding struct rt_small_mem_item ... */
508 mem = (struct rt_small_mem_item *)((rt_uint8_t *)rmem - SIZEOF_STRUCT_MEM);
509 /* ... which has to be in a used state ... */
510 small_mem = MEM_POOL(mem);
511 RT_ASSERT(small_mem != RT_NULL);
512 RT_ASSERT(MEM_ISUSED(mem));
514 RT_ASSERT(rt_object_is_systemobject(&small_mem->parent.parent));
515 RT_ASSERT((rt_uint8_t *)rmem >= (rt_uint8_t *)small_mem->heap_ptr &&
516 (rt_uint8_t *)rmem < (rt_uint8_t *)small_mem->heap_end);
517 RT_ASSERT(MEM_POOL(&small_mem->heap_ptr[mem->next]) == small_mem);
518
519 LOG_D("release memory 0x%x, size: %d",
520 (rt_uintptr_t)rmem,
521 (rt_uintptr_t)(mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr)));
522
523 /* ... and is now unused. */
524 mem->pool_ptr = MEM_FREED(small_mem);
525#ifdef RT_USING_MEMTRACE
526 rt_smem_setname(mem, " ");
527#endif /* RT_USING_MEMTRACE */
528
529 if (mem < small_mem->lfree)
530 {
531 /* the newly freed struct is now the lowest */
532 small_mem->lfree = mem;
533 }
534
535 small_mem->parent.used -= (mem->next - ((rt_uint8_t *)mem - small_mem->heap_ptr));
536
537 /* finally, see if prev or next are free also */
538 plug_holes(small_mem, mem);
539}
541
542#ifdef RT_USING_FINSH
543#include <finsh.h>
544
545#ifdef RT_USING_MEMTRACE
546static int memcheck(int argc, char *argv[])
547{
548 int position;
549 rt_base_t level;
550 struct rt_small_mem_item *mem;
551 struct rt_small_mem *m;
552 struct rt_object_information *information;
553 struct rt_list_node *node;
554 struct rt_object *object;
555 char *name;
556
557 name = argc > 1 ? argv[1] : RT_NULL;
558 level = rt_hw_interrupt_disable();
559 /* get mem object */
561 for (node = information->object_list.next;
562 node != &(information->object_list);
563 node = node->next)
564 {
565 object = rt_list_entry(node, struct rt_object, list);
566 /* find the specified object */
567 if (name != RT_NULL && rt_strncmp(name, object->name, RT_NAME_MAX) != 0)
568 {
569 continue;
570 }
571 /* mem object */
572 m = (struct rt_small_mem *)object;
573 if(rt_strncmp(m->parent.algorithm, "small", RT_NAME_MAX) != 0)
574 {
575 continue;
576 }
577
578 /* check mem */
579 for (mem = (struct rt_small_mem_item *)m->heap_ptr; mem != m->heap_end; mem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next])
580 {
581 position = (rt_uintptr_t)mem - (rt_uintptr_t)m->heap_ptr;
582 if (position < 0) goto __exit;
583 if (position > (int)m->mem_size_aligned) goto __exit;
584 if (MEM_POOL(mem) != m) goto __exit;
585 }
586 }
588
589 return 0;
590__exit:
591 rt_kprintf("Memory block wrong:\n");
592 rt_kprintf(" name: %s\n", m->parent.parent.name);
593 rt_kprintf("address: 0x%08x\n", mem);
594 rt_kprintf(" pool: 0x%04x\n", mem->pool_ptr);
595 rt_kprintf(" size: %d\n", mem->next - position - SIZEOF_STRUCT_MEM);
597
598 return 0;
599}
600MSH_CMD_EXPORT(memcheck, check memory data);
601
602static int memtrace(int argc, char **argv)
603{
604 struct rt_small_mem_item *mem;
605 struct rt_small_mem *m;
606 struct rt_object_information *information;
607 struct rt_list_node *node;
608 struct rt_object *object;
609 char *name;
610
611 name = argc > 1 ? argv[1] : RT_NULL;
612 /* get mem object */
614 for (node = information->object_list.next;
615 node != &(information->object_list);
616 node = node->next)
617 {
618 object = rt_list_entry(node, struct rt_object, list);
619 /* find the specified object */
620 if (name != RT_NULL && rt_strncmp(name, object->name, RT_NAME_MAX) != 0)
621 {
622 continue;
623 }
624 /* mem object */
625 m = (struct rt_small_mem *)object;
626 if(rt_strncmp(m->parent.algorithm, "small", RT_NAME_MAX) != 0)
627 {
628 continue;
629 }
630 /* show memory information */
631 rt_kprintf("\nmemory heap address:\n");
632 rt_kprintf("name : %s\n", m->parent.parent.name);
633 rt_kprintf("total : %d\n", m->parent.total);
634 rt_kprintf("used : %d\n", m->parent.used);
635 rt_kprintf("max_used: %d\n", m->parent.max);
636 rt_kprintf("heap_ptr: 0x%08x\n", m->heap_ptr);
637 rt_kprintf("lfree : 0x%08x\n", m->lfree);
638 rt_kprintf("heap_end: 0x%08x\n", m->heap_end);
639 rt_kprintf("\n--memory item information --\n");
640 for (mem = (struct rt_small_mem_item *)m->heap_ptr; mem != m->heap_end; mem = (struct rt_small_mem_item *)&m->heap_ptr[mem->next])
641 {
642 int size = MEM_SIZE(m, mem);
643
644 rt_kprintf("[0x%08x - ", mem);
645 if (size < 1024)
646 rt_kprintf("%5d", size);
647 else if (size < 1024 * 1024)
648 rt_kprintf("%4dK", size / 1024);
649 else
650 rt_kprintf("%4dM", size / (1024 * 1024));
651
652 rt_kprintf("] %c%c%c%c", mem->thread[0], mem->thread[1], mem->thread[2], mem->thread[3]);
653 if (MEM_POOL(mem) != m)
654 rt_kprintf(": ***\n");
655 else
656 rt_kprintf("\n");
657 }
658 }
659 return 0;
660}
661MSH_CMD_EXPORT(memtrace, dump memory trace information);
662#endif /* RT_USING_MEMTRACE */
663#endif /* RT_USING_FINSH */
664
665#endif /* defined (RT_USING_SMALL_MEM) */
666
#define RT_ALIGN(size, align)
#define RT_ALIGN_DOWN(size, align)
void rt_object_init(struct rt_object *object, enum rt_object_class_type type, const char *name)
This function will initialize an object and add it to object system management.
rt_bool_t rt_object_is_systemobject(rt_object_t object)
This function will judge the object is system object or not.
struct rt_object_information * rt_object_get_information(enum rt_object_class_type type)
This function will return the specified type of object information.
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
void rt_object_detach(rt_object_t object)
This function will detach a static object from object system, and the memory of static object is not ...
@ RT_Object_Class_Memory
#define rt_kprintf(...)
#define rt_list_entry(node, type, member)
get the struct for this entry
#define RT_ASSERT(EX)
void rt_smem_free(void *rmem)
This function will release the previously allocated memory block by rt_mem_alloc. The released memory...
定义 mem.c:497
rt_mem_t rt_smem_t
void * rt_smem_alloc(rt_smem_t m, rt_size_t size)
Allocate a block of memory with a minimum of 'size' bytes.
定义 mem.c:271
void * rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize)
This function will change the size of previously allocated memory block.
定义 mem.c:403
rt_smem_t rt_smem_init(const char *name, void *begin_addr, rt_size_t size)
This function will initialize small memory management algorithm.
定义 mem.c:170
rt_err_t rt_smem_detach(rt_smem_t m)
This function will remove a small mem from the system.
定义 mem.c:244
rt_thread_t rt_thread_self(void)
This function will return self thread object.
#define MSH_CMD_EXPORT(...)
#define MEM_SIZE(_heap, _mem)
定义 mem.c:95
#define MIN_SIZE
定义 mem.c:85
#define MEM_POOL(_mem)
定义 mem.c:93
#define MIN_SIZE_ALIGNED
定义 mem.c:99
#define MEM_FREED(_mem)
定义 mem.c:90
#define SIZEOF_STRUCT_MEM
定义 mem.c:100
#define MEM_ISUSED(_mem)
定义 mem.c:91
#define MEM_USED(_mem)
定义 mem.c:89
#define LOG_D(...)
void rt_hw_interrupt_enable(rt_base_t level)
rt_base_t rt_hw_interrupt_disable(void)
#define RTM_EXPORT(symbol)
定义 rtm.h:33
rt_int32_t rt_base_t
rt_base_t rt_uintptr_t
rt_base_t rt_err_t
unsigned char rt_uint8_t
rt_ubase_t rt_size_t
#define RT_NULL
struct rt_list_node * next
rt_size_t total
rt_ubase_t address
struct rt_object parent
rt_size_t max
const char * algorithm
rt_size_t used
rt_list_t list
const char * name