RT-Thread RTOS 1.2.0
An open source embedded real-time operating system
载入中...
搜索中...
未找到
slab.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2006-2021, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/*
8 * File : slab.c
9 *
10 * Change Logs:
11 * Date Author Notes
12 * 2008-07-12 Bernard the first version
13 * 2010-07-13 Bernard fix RT_ALIGN issue found by kuronca
14 * 2010-10-23 yi.qiu add module memory allocator
15 * 2010-12-18 yi.qiu fix zone release bug
16 */
17
18/*
19 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
20 *
21 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
22 *
23 * This code is derived from software contributed to The DragonFly Project
24 * by Matthew Dillon <dillon@backplane.com>
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * 3. Neither the name of The DragonFly Project nor the names of its
37 * contributors may be used to endorse or promote products derived
38 * from this software without specific, prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 */
54
55#include <rthw.h>
56#include <rtthread.h>
57
58#ifdef RT_USING_SLAB
59
60#define DBG_TAG "kernel.slab"
61#define DBG_LVL DBG_INFO
62#include <rtdbg.h>
63
64/*
65 * slab allocator implementation
66 *
67 * A slab allocator reserves a ZONE for each chunk size, then lays the
68 * chunks out in an array within the zone. Allocation and deallocation
69 * is nearly instantanious, and fragmentation/overhead losses are limited
70 * to a fixed worst-case amount.
71 *
72 * The downside of this slab implementation is in the chunk size
73 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
74 * In a kernel implementation all this memory will be physical so
75 * the zone size is adjusted downward on machines with less physical
76 * memory. The upside is that overhead is bounded... this is the *worst*
77 * case overhead.
78 *
79 * Slab management is done on a per-cpu basis and no locking or mutexes
80 * are required, only a critical section. When one cpu frees memory
81 * belonging to another cpu's slab manager an asynchronous IPI message
82 * will be queued to execute the operation. In addition, both the
83 * high level slab allocator and the low level zone allocator optimize
84 * M_ZERO requests, and the slab allocator does not have to pre initialize
85 * the linked list of chunks.
86 *
87 * XXX Balancing is needed between cpus. Balance will be handled through
88 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
89 *
90 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
91 * the new zone should be restricted to M_USE_RESERVE requests only.
92 *
93 * Alloc Size Chunking Number of zones
94 * 0-127 8 16
95 * 128-255 16 8
96 * 256-511 32 8
97 * 512-1023 64 8
98 * 1024-2047 128 8
99 * 2048-4095 256 8
100 * 4096-8191 512 8
101 * 8192-16383 1024 8
102 * 16384-32767 2048 8
103 * (if RT_MM_PAGE_SIZE is 4K the maximum zone allocation is 16383)
104 *
105 * Allocations >= zone_limit go directly to kmem.
106 *
107 * API REQUIREMENTS AND SIDE EFFECTS
108 *
109 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
110 * have remained compatible with the following API requirements:
111 *
112 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
113 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
114 * + malloc(0) is allowed and returns non-RT_NULL (ahc driver)
115 * + ability to allocate arbitrarily large chunks of memory
116 */
117
118#define ZALLOC_SLAB_MAGIC 0x51ab51ab
119#define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
120#define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
121#define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
122#define ZONE_RELEASE_THRESH 2 /* threshold number of zones */
123
124/*
125 * Misc constants. Note that allocations that are exact multiples of
126 * RT_MM_PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
127 */
128#define MIN_CHUNK_SIZE 8 /* in bytes */
129#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
130
131/*
132 * Array of descriptors that describe the contents of each page
133 */
134#define PAGE_TYPE_FREE 0x00
135#define PAGE_TYPE_SMALL 0x01
136#define PAGE_TYPE_LARGE 0x02
137
138#define btokup(addr) \
139 (&slab->memusage[((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
140
144
145/*
146 * The IN-BAND zone header is placed at the beginning of each zone.
147 */
148struct rt_slab_zone
149{
150 rt_uint32_t z_magic;
151 rt_uint32_t z_nfree;
152 rt_uint32_t z_nmax;
153 struct rt_slab_zone *z_next;
154 rt_uint8_t *z_baseptr;
155
156 rt_uint32_t z_uindex;
157 rt_uint32_t z_chunksize;
158
159 rt_uint32_t z_zoneindex;
160 struct rt_slab_chunk *z_freechunk;
161};
162
163/*
164 * Chunk structure for free elements
165 */
166struct rt_slab_chunk
167{
168 struct rt_slab_chunk *c_next;
169};
170
171struct rt_slab_memusage
172{
173 rt_uint32_t type: 2 ;
174 rt_uint32_t size: 30;
175};
176
177/*
178 * slab page allocator
179 */
180struct rt_slab_page
181{
182 struct rt_slab_page *next;
183 rt_size_t page;
184
185 /* dummy */
186 char dummy[RT_MM_PAGE_SIZE - (sizeof(struct rt_slab_page *) + sizeof(rt_size_t))];
187};
188
189#define RT_SLAB_NZONES 72 /* number of zones */
190
191/*
192 * slab object
193 */
194struct rt_slab
195{
196 struct rt_memory parent;
197 rt_uintptr_t heap_start;
198 rt_uintptr_t heap_end;
199 struct rt_slab_memusage *memusage;
200 struct rt_slab_zone *zone_array[RT_SLAB_NZONES]; /* linked list of zones NFree > 0 */
201 struct rt_slab_zone *zone_free; /* whole zones that have become free */
202 rt_uint32_t zone_free_cnt;
203 rt_uint32_t zone_size;
204 rt_uint32_t zone_limit;
205 rt_uint32_t zone_page_cnt;
206 struct rt_slab_page *page_list;
207};
208
216void *rt_slab_page_alloc(rt_slab_t m, rt_size_t npages)
217{
218 struct rt_slab_page *b, *n;
219 struct rt_slab_page **prev;
220 struct rt_slab *slab = (struct rt_slab *)m;
221
222 if (npages == 0)
223 return RT_NULL;
224
225 for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next))
226 {
227 if (b->page > npages)
228 {
229 /* splite pages */
230 n = b + npages;
231 n->next = b->next;
232 n->page = b->page - npages;
233 *prev = n;
234 break;
235 }
236
237 if (b->page == npages)
238 {
239 /* this node fit, remove this node */
240 *prev = b->next;
241 break;
242 }
243 }
244
245 return b;
246}
247
257void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages)
258{
259 struct rt_slab_page *b, *n;
260 struct rt_slab_page **prev;
261 struct rt_slab *slab = (struct rt_slab *)m;
262
263 RT_ASSERT(addr != RT_NULL);
265 RT_ASSERT(npages != 0);
266
267 n = (struct rt_slab_page *)addr;
268
269 for (prev = &slab->page_list; (b = *prev) != RT_NULL; prev = &(b->next))
270 {
271 RT_ASSERT(b->page > 0);
272 RT_ASSERT(b > n || b + b->page <= n);
273
274 if (b + b->page == n)
275 {
276 if (b + (b->page += npages) == b->next)
277 {
278 b->page += b->next->page;
279 b->next = b->next->next;
280 }
281 return;
282 }
283
284 if (b == n + npages)
285 {
286 n->page = b->page + npages;
287 n->next = b->next;
288 *prev = n;
289 return;
290 }
291
292 if (b > n + npages)
293 break;
294 }
295
296 n->page = npages;
297 n->next = b;
298 *prev = n;
299}
300
301/*
302 * Initialize the page allocator
303 */
304static void rt_slab_page_init(struct rt_slab *slab, void *addr, rt_size_t npages)
305{
306 RT_ASSERT(addr != RT_NULL);
307 RT_ASSERT(npages != 0);
308
309 slab->page_list = RT_NULL;
310 rt_slab_page_free((rt_slab_t)(&slab->parent), addr, npages);
311}
312
324rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size)
325{
326 rt_uint32_t limsize, npages;
327 rt_uintptr_t start_addr, begin_align, end_align;
328 struct rt_slab *slab;
329
330 slab = (struct rt_slab *)RT_ALIGN((rt_uintptr_t)begin_addr, RT_ALIGN_SIZE);
331 start_addr = (rt_uintptr_t)slab + sizeof(*slab);
332 /* align begin and end addr to page */
333 begin_align = RT_ALIGN((rt_uintptr_t)start_addr, RT_MM_PAGE_SIZE);
334 end_align = RT_ALIGN_DOWN((rt_uintptr_t)begin_addr + size, RT_MM_PAGE_SIZE);
335 if (begin_align >= end_align)
336 {
337 rt_kprintf("slab init errr. wrong address[0x%x - 0x%x]\n",
338 (rt_uintptr_t)begin_addr, (rt_uintptr_t)begin_addr + size);
339 return RT_NULL;
340 }
341
342 limsize = end_align - begin_align;
343 npages = limsize / RT_MM_PAGE_SIZE;
344 LOG_D("heap[0x%x - 0x%x], size 0x%x, 0x%x pages",
345 begin_align, end_align, limsize, npages);
346
347 rt_memset(slab, 0, sizeof(*slab));
348 /* initialize slab memory object */
349 rt_object_init(&(slab->parent.parent), RT_Object_Class_Memory, name);
350 slab->parent.algorithm = "slab";
351 slab->parent.address = begin_align;
352 slab->parent.total = limsize;
353 slab->parent.used = 0;
354 slab->parent.max = 0;
355 slab->heap_start = begin_align;
356 slab->heap_end = end_align;
357
358 /* init pages */
359 rt_slab_page_init(slab, (void *)slab->heap_start, npages);
360
361 /* calculate zone size */
362 slab->zone_size = ZALLOC_MIN_ZONE_SIZE;
363 while (slab->zone_size < ZALLOC_MAX_ZONE_SIZE && (slab->zone_size << 1) < (limsize / 1024))
364 slab->zone_size <<= 1;
365
366 slab->zone_limit = slab->zone_size / 4;
367 if (slab->zone_limit > ZALLOC_ZONE_LIMIT)
368 slab->zone_limit = ZALLOC_ZONE_LIMIT;
369
370 slab->zone_page_cnt = slab->zone_size / RT_MM_PAGE_SIZE;
371
372 LOG_D("zone size 0x%x, zone page count 0x%x",
373 slab->zone_size, slab->zone_page_cnt);
374
375 /* allocate slab->memusage array */
376 limsize = npages * sizeof(struct rt_slab_memusage);
377 limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE);
378 slab->memusage = rt_slab_page_alloc((rt_slab_t)(&slab->parent), limsize / RT_MM_PAGE_SIZE);
379
380 LOG_D("slab->memusage 0x%x, size 0x%x",
381 (rt_uintptr_t)slab->memusage, limsize);
382 return &slab->parent;
383}
384RTM_EXPORT(rt_slab_init);
385
393rt_err_t rt_slab_detach(rt_slab_t m)
394{
395 struct rt_slab *slab = (struct rt_slab *)m;
396
397 RT_ASSERT(slab != RT_NULL);
398 RT_ASSERT(rt_object_get_type(&slab->parent.parent) == RT_Object_Class_Memory);
399 RT_ASSERT(rt_object_is_systemobject(&slab->parent.parent));
400
401 rt_object_detach(&(slab->parent.parent));
402
403 return RT_EOK;
404}
405RTM_EXPORT(rt_slab_detach);
406
407/*
408 * Calculate the zone index for the allocation request size and set the
409 * allocation request size to that particular zone's chunk size.
410 */
411rt_inline int zoneindex(rt_size_t *bytes)
412{
413 /* unsigned for shift opt */
414 rt_uintptr_t n = (rt_uintptr_t)(*bytes);
415
416 if (n < 128)
417 {
418 *bytes = n = (n + 7) & ~7;
419
420 /* 8 byte chunks, 16 zones */
421 return (n / 8 - 1);
422 }
423 if (n < 256)
424 {
425 *bytes = n = (n + 15) & ~15;
426
427 return (n / 16 + 7);
428 }
429 if (n < 8192)
430 {
431 if (n < 512)
432 {
433 *bytes = n = (n + 31) & ~31;
434
435 return (n / 32 + 15);
436 }
437 if (n < 1024)
438 {
439 *bytes = n = (n + 63) & ~63;
440
441 return (n / 64 + 23);
442 }
443 if (n < 2048)
444 {
445 *bytes = n = (n + 127) & ~127;
446
447 return (n / 128 + 31);
448 }
449 if (n < 4096)
450 {
451 *bytes = n = (n + 255) & ~255;
452
453 return (n / 256 + 39);
454 }
455 *bytes = n = (n + 511) & ~511;
456
457 return (n / 512 + 47);
458 }
459 if (n < 16384)
460 {
461 *bytes = n = (n + 1023) & ~1023;
462
463 return (n / 1024 + 55);
464 }
465
466 rt_kprintf("Unexpected byte count %d", n);
467
468 return 0;
469}
470
474
476
490void *rt_slab_alloc(rt_slab_t m, rt_size_t size)
491{
492 struct rt_slab_zone *z;
493 rt_int32_t zi;
494 struct rt_slab_chunk *chunk;
495 struct rt_slab_memusage *kup;
496 struct rt_slab *slab = (struct rt_slab *)m;
497
498 /* zero size, return RT_NULL */
499 if (size == 0)
500 return RT_NULL;
501
502 /*
503 * Handle large allocations directly. There should not be very many of
504 * these so performance is not a big issue.
505 */
506 if (size >= slab->zone_limit)
507 {
508 size = RT_ALIGN(size, RT_MM_PAGE_SIZE);
509
510 chunk = rt_slab_page_alloc(m, size >> RT_MM_PAGE_BITS);
511 if (chunk == RT_NULL)
512 return RT_NULL;
513
514 /* set kup */
515 kup = btokup(chunk);
516 kup->type = PAGE_TYPE_LARGE;
517 kup->size = size >> RT_MM_PAGE_BITS;
518
519 LOG_D("alloc a large memory 0x%x, page cnt %d, kup %d",
520 size,
521 size >> RT_MM_PAGE_BITS,
522 ((rt_uintptr_t)chunk - slab->heap_start) >> RT_MM_PAGE_BITS);
523 /* mem stat */
524 slab->parent.used += size;
525 if (slab->parent.used > slab->parent.max)
526 slab->parent.max = slab->parent.used;
527 return chunk;
528 }
529
530 /*
531 * Attempt to allocate out of an existing zone. First try the free list,
532 * then allocate out of unallocated space. If we find a good zone move
533 * it to the head of the list so later allocations find it quickly
534 * (we might have thousands of zones in the list).
535 *
536 * Note: zoneindex() will panic of size is too large.
537 */
538 zi = zoneindex(&size);
539 RT_ASSERT(zi < RT_SLAB_NZONES);
540
541 LOG_D("try to alloc 0x%x on zone: %d", size, zi);
542
543 if ((z = slab->zone_array[zi]) != RT_NULL)
544 {
545 RT_ASSERT(z->z_nfree > 0);
546
547 /* Remove us from the zone_array[] when we become full */
548 if (--z->z_nfree == 0)
549 {
550 slab->zone_array[zi] = z->z_next;
551 z->z_next = RT_NULL;
552 }
553
554 /*
555 * No chunks are available but nfree said we had some memory, so
556 * it must be available in the never-before-used-memory area
557 * governed by uindex. The consequences are very serious if our zone
558 * got corrupted so we use an explicit rt_kprintf rather then a KASSERT.
559 */
560 if (z->z_uindex + 1 != z->z_nmax)
561 {
562 z->z_uindex = z->z_uindex + 1;
563 chunk = (struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
564 }
565 else
566 {
567 /* find on free chunk list */
568 chunk = z->z_freechunk;
569
570 /* remove this chunk from list */
571 z->z_freechunk = z->z_freechunk->c_next;
572 }
573 /* mem stats */
574 slab->parent.used += z->z_chunksize;
575 if (slab->parent.used > slab->parent.max)
576 slab->parent.max = slab->parent.used;
577
578 return chunk;
579 }
580
581 /*
582 * If all zones are exhausted we need to allocate a new zone for this
583 * index.
584 *
585 * At least one subsystem, the tty code (see CROUND) expects power-of-2
586 * allocations to be power-of-2 aligned. We maintain compatibility by
587 * adjusting the base offset below.
588 */
589 {
590 rt_uint32_t off;
591
592 if ((z = slab->zone_free) != RT_NULL)
593 {
594 /* remove zone from free zone list */
595 slab->zone_free = z->z_next;
596 -- slab->zone_free_cnt;
597 }
598 else
599 {
600 /* allocate a zone from page */
601 z = rt_slab_page_alloc(m, slab->zone_size / RT_MM_PAGE_SIZE);
602 if (z == RT_NULL)
603 {
604 return RT_NULL;
605 }
606
607 LOG_D("alloc a new zone: 0x%x",
608 (rt_uintptr_t)z);
609
610 /* set message usage */
611 for (off = 0, kup = btokup(z); off < slab->zone_page_cnt; off ++)
612 {
613 kup->type = PAGE_TYPE_SMALL;
614 kup->size = off;
615
616 kup ++;
617 }
618 }
619
620 /* clear to zero */
621 rt_memset(z, 0, sizeof(struct rt_slab_zone));
622
623 /* offset of slab zone struct in zone */
624 off = sizeof(struct rt_slab_zone);
625
626 /*
627 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
628 * Otherwise just 8-byte align the data.
629 */
630 if ((size | (size - 1)) + 1 == (size << 1))
631 off = (off + size - 1) & ~(size - 1);
632 else
633 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
634
635 z->z_magic = ZALLOC_SLAB_MAGIC;
636 z->z_zoneindex = zi;
637 z->z_nmax = (slab->zone_size - off) / size;
638 z->z_nfree = z->z_nmax - 1;
639 z->z_baseptr = (rt_uint8_t *)z + off;
640 z->z_uindex = 0;
641 z->z_chunksize = size;
642
643 chunk = (struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
644
645 /* link to zone array */
646 z->z_next = slab->zone_array[zi];
647 slab->zone_array[zi] = z;
648 /* mem stats */
649 slab->parent.used += z->z_chunksize;
650 if (slab->parent.used > slab->parent.max)
651 slab->parent.max = slab->parent.used;
652 }
653
654 return chunk;
655}
656RTM_EXPORT(rt_slab_alloc);
657
669void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size)
670{
671 void *nptr;
672 struct rt_slab_zone *z;
673 struct rt_slab_memusage *kup;
674 struct rt_slab *slab = (struct rt_slab *)m;
675
676 if (ptr == RT_NULL)
677 return rt_slab_alloc(m, size);
678
679 if (size == 0)
680 {
681 rt_slab_free(m, ptr);
682 return RT_NULL;
683 }
684
685 /*
686 * Get the original allocation's zone. If the new request winds up
687 * using the same chunk size we do not have to do anything.
688 */
689 kup = btokup((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK);
690 if (kup->type == PAGE_TYPE_LARGE)
691 {
692 rt_size_t osize;
693
694 osize = kup->size << RT_MM_PAGE_BITS;
695 if ((nptr = rt_slab_alloc(m, size)) == RT_NULL)
696 return RT_NULL;
697 rt_memcpy(nptr, ptr, size > osize ? osize : size);
698 rt_slab_free(m, ptr);
699
700 return nptr;
701 }
702 else if (kup->type == PAGE_TYPE_SMALL)
703 {
704 z = (struct rt_slab_zone *)(((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK) -
705 kup->size * RT_MM_PAGE_SIZE);
706 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
707
708 zoneindex(&size);
709 if (z->z_chunksize == size)
710 return (ptr); /* same chunk */
711
712 /*
713 * Allocate memory for the new request size. Note that zoneindex has
714 * already adjusted the request size to the appropriate chunk size, which
715 * should optimize our bcopy(). Then copy and return the new pointer.
716 */
717 if ((nptr = rt_slab_alloc(m, size)) == RT_NULL)
718 return RT_NULL;
719
720 rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
721 rt_slab_free(m, ptr);
722
723 return nptr;
724 }
725
726 return RT_NULL;
727}
728RTM_EXPORT(rt_slab_realloc);
729
738void rt_slab_free(rt_slab_t m, void *ptr)
739{
740 struct rt_slab_zone *z;
741 struct rt_slab_chunk *chunk;
742 struct rt_slab_memusage *kup;
743 struct rt_slab *slab = (struct rt_slab *)m;
744
745 /* free a RT_NULL pointer */
746 if (ptr == RT_NULL)
747 return ;
748
749 /* get memory usage */
750#if (DBG_LVL == DBG_LOG)
751 {
753 LOG_D("free a memory 0x%x and align to 0x%x, kup index %d",
754 (rt_uintptr_t)ptr,
755 (rt_uintptr_t)addr,
756 ((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS);
757 }
758#endif /* DBG_LVL == DBG_LOG */
759
760 kup = btokup((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK);
761 /* release large allocation */
762 if (kup->type == PAGE_TYPE_LARGE)
763 {
764 rt_uintptr_t size;
765
766 /* clear page counter */
767 size = kup->size;
768 kup->size = 0;
769 /* mem stats */
770 slab->parent.used -= size * RT_MM_PAGE_SIZE;
771
772 LOG_D("free large memory block 0x%x, page count %d",
773 (rt_uintptr_t)ptr, size);
774
775 /* free this page */
776 rt_slab_page_free(m, ptr, size);
777
778 return;
779 }
780
781 /* zone case. get out zone. */
782 z = (struct rt_slab_zone *)(((rt_uintptr_t)ptr & ~RT_MM_PAGE_MASK) -
783 kup->size * RT_MM_PAGE_SIZE);
784 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
785
786 chunk = (struct rt_slab_chunk *)ptr;
787 chunk->c_next = z->z_freechunk;
788 z->z_freechunk = chunk;
789 /* mem stats */
790 slab->parent.used -= z->z_chunksize;
791
792 /*
793 * Bump the number of free chunks. If it becomes non-zero the zone
794 * must be added back onto the appropriate list.
795 */
796 if (z->z_nfree++ == 0)
797 {
798 z->z_next = slab->zone_array[z->z_zoneindex];
799 slab->zone_array[z->z_zoneindex] = z;
800 }
801
802 /*
803 * If the zone becomes totally free, and there are other zones we
804 * can allocate from, move this zone to the FreeZones list. Since
805 * this code can be called from an IPI callback, do *NOT* try to mess
806 * with kernel_map here. Hysteresis will be performed at malloc() time.
807 */
808 if (z->z_nfree == z->z_nmax &&
809 (z->z_next || slab->zone_array[z->z_zoneindex] != z))
810 {
811 struct rt_slab_zone **pz;
812
813 LOG_D("free zone %#x, zoneindex %d",
814 (rt_uintptr_t)z, z->z_zoneindex);
815
816 /* remove zone from zone array list */
817 for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
818 ;
819 *pz = z->z_next;
820
821 /* reset zone */
822 z->z_magic = RT_UINT32_MAX;
823
824 /* insert to free zone list */
825 z->z_next = slab->zone_free;
826 slab->zone_free = z;
827
828 ++ slab->zone_free_cnt;
829
830 /* release zone to page allocator */
831 if (slab->zone_free_cnt > ZONE_RELEASE_THRESH)
832 {
833 register rt_uint32_t i;
834
835 z = slab->zone_free;
836 slab->zone_free = z->z_next;
837 -- slab->zone_free_cnt;
838
839 /* set message usage */
840 for (i = 0, kup = btokup(z); i < slab->zone_page_cnt; i ++)
841 {
842 kup->type = PAGE_TYPE_FREE;
843 kup->size = 0;
844 kup ++;
845 }
846
847 /* release pages */
848 rt_slab_page_free(m, z, slab->zone_size / RT_MM_PAGE_SIZE);
849
850 return;
851 }
852 }
853}
854RTM_EXPORT(rt_slab_free);
855
856#endif /* RT_USING_SLAB */
#define RT_ALIGN(size, align)
#define RT_ALIGN_DOWN(size, align)
void rt_object_init(struct rt_object *object, enum rt_object_class_type type, const char *name)
This function will initialize an object and add it to object system management.
rt_bool_t rt_object_is_systemobject(rt_object_t object)
This function will judge the object is system object or not.
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
void rt_object_detach(rt_object_t object)
This function will detach a static object from object system, and the memory of static object is not ...
@ RT_Object_Class_Memory
#define rt_kprintf(...)
#define RT_ASSERT(EX)
#define LOG_D(...)
#define RT_MM_PAGE_MASK
#define RT_MM_PAGE_SIZE
#define RT_UINT32_MAX
#define RT_MM_PAGE_BITS
#define RTM_EXPORT(symbol)
定义 rtm.h:33
rt_base_t rt_uintptr_t
rt_base_t rt_err_t
unsigned char rt_uint8_t
rt_ubase_t rt_size_t
unsigned int rt_uint32_t
#define RT_NULL
signed int rt_int32_t