60#define DBG_TAG "kernel.slab"
61#define DBG_LVL DBG_INFO
118#define ZALLOC_SLAB_MAGIC 0x51ab51ab
119#define ZALLOC_ZONE_LIMIT (16 * 1024)
120#define ZALLOC_MIN_ZONE_SIZE (32 * 1024)
121#define ZALLOC_MAX_ZONE_SIZE (128 * 1024)
122#define ZONE_RELEASE_THRESH 2
128#define MIN_CHUNK_SIZE 8
129#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
134#define PAGE_TYPE_FREE 0x00
135#define PAGE_TYPE_SMALL 0x01
136#define PAGE_TYPE_LARGE 0x02
138#define btokup(addr) \
139 (&slab->memusage[((rt_uintptr_t)(addr) - slab->heap_start) >> RT_MM_PAGE_BITS])
153 struct rt_slab_zone *z_next;
160 struct rt_slab_chunk *z_freechunk;
168 struct rt_slab_chunk *c_next;
171struct rt_slab_memusage
182 struct rt_slab_page *next;
189#define RT_SLAB_NZONES 72
196 struct rt_memory parent;
199 struct rt_slab_memusage *memusage;
200 struct rt_slab_zone *zone_array[RT_SLAB_NZONES];
201 struct rt_slab_zone *zone_free;
206 struct rt_slab_page *page_list;
216void *rt_slab_page_alloc(rt_slab_t m,
rt_size_t npages)
218 struct rt_slab_page *b, *n;
219 struct rt_slab_page **prev;
220 struct rt_slab *slab = (
struct rt_slab *)m;
225 for (prev = &slab->page_list; (b = *prev) !=
RT_NULL; prev = &(b->next))
227 if (b->page > npages)
232 n->page = b->page - npages;
237 if (b->page == npages)
257void rt_slab_page_free(rt_slab_t m,
void *addr,
rt_size_t npages)
259 struct rt_slab_page *b, *n;
260 struct rt_slab_page **prev;
261 struct rt_slab *slab = (
struct rt_slab *)m;
267 n = (
struct rt_slab_page *)addr;
269 for (prev = &slab->page_list; (b = *prev) !=
RT_NULL; prev = &(b->next))
274 if (b + b->page == n)
276 if (b + (b->page += npages) == b->next)
278 b->page += b->next->page;
279 b->next = b->next->next;
286 n->page = b->page + npages;
304static void rt_slab_page_init(
struct rt_slab *slab,
void *addr,
rt_size_t npages)
310 rt_slab_page_free((rt_slab_t)(&slab->parent), addr, npages);
324rt_slab_t rt_slab_init(
const char *name,
void *begin_addr,
rt_size_t size)
328 struct rt_slab *slab;
335 if (begin_align >= end_align)
337 rt_kprintf(
"slab init errr. wrong address[0x%x - 0x%x]\n",
342 limsize = end_align - begin_align;
344 LOG_D(
"heap[0x%x - 0x%x], size 0x%x, 0x%x pages",
345 begin_align, end_align, limsize, npages);
347 rt_memset(slab, 0,
sizeof(*slab));
350 slab->parent.algorithm =
"slab";
351 slab->parent.address = begin_align;
352 slab->parent.total = limsize;
353 slab->parent.used = 0;
354 slab->parent.max = 0;
355 slab->heap_start = begin_align;
356 slab->heap_end = end_align;
359 rt_slab_page_init(slab, (
void *)slab->heap_start, npages);
362 slab->zone_size = ZALLOC_MIN_ZONE_SIZE;
363 while (slab->zone_size < ZALLOC_MAX_ZONE_SIZE && (slab->zone_size << 1) < (limsize / 1024))
364 slab->zone_size <<= 1;
366 slab->zone_limit = slab->zone_size / 4;
367 if (slab->zone_limit > ZALLOC_ZONE_LIMIT)
368 slab->zone_limit = ZALLOC_ZONE_LIMIT;
372 LOG_D(
"zone size 0x%x, zone page count 0x%x",
373 slab->zone_size, slab->zone_page_cnt);
376 limsize = npages *
sizeof(
struct rt_slab_memusage);
378 slab->memusage = rt_slab_page_alloc((rt_slab_t)(&slab->parent), limsize /
RT_MM_PAGE_SIZE);
380 LOG_D(
"slab->memusage 0x%x, size 0x%x",
382 return &slab->parent;
395 struct rt_slab *slab = (
struct rt_slab *)m;
418 *bytes = n = (n + 7) & ~7;
425 *bytes = n = (n + 15) & ~15;
433 *bytes = n = (n + 31) & ~31;
435 return (n / 32 + 15);
439 *bytes = n = (n + 63) & ~63;
441 return (n / 64 + 23);
445 *bytes = n = (n + 127) & ~127;
447 return (n / 128 + 31);
451 *bytes = n = (n + 255) & ~255;
453 return (n / 256 + 39);
455 *bytes = n = (n + 511) & ~511;
457 return (n / 512 + 47);
461 *bytes = n = (n + 1023) & ~1023;
463 return (n / 1024 + 55);
490void *rt_slab_alloc(rt_slab_t m,
rt_size_t size)
492 struct rt_slab_zone *z;
494 struct rt_slab_chunk *chunk;
495 struct rt_slab_memusage *kup;
496 struct rt_slab *slab = (
struct rt_slab *)m;
506 if (size >= slab->zone_limit)
516 kup->type = PAGE_TYPE_LARGE;
519 LOG_D(
"alloc a large memory 0x%x, page cnt %d, kup %d",
524 slab->parent.used += size;
525 if (slab->parent.used > slab->parent.max)
526 slab->parent.max = slab->parent.used;
538 zi = zoneindex(&size);
541 LOG_D(
"try to alloc 0x%x on zone: %d", size, zi);
543 if ((z = slab->zone_array[zi]) !=
RT_NULL)
548 if (--z->z_nfree == 0)
550 slab->zone_array[zi] = z->z_next;
560 if (z->z_uindex + 1 != z->z_nmax)
562 z->z_uindex = z->z_uindex + 1;
563 chunk = (
struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
568 chunk = z->z_freechunk;
571 z->z_freechunk = z->z_freechunk->c_next;
574 slab->parent.used += z->z_chunksize;
575 if (slab->parent.used > slab->parent.max)
576 slab->parent.max = slab->parent.used;
592 if ((z = slab->zone_free) !=
RT_NULL)
595 slab->zone_free = z->z_next;
596 -- slab->zone_free_cnt;
607 LOG_D(
"alloc a new zone: 0x%x",
611 for (off = 0, kup = btokup(z); off < slab->zone_page_cnt; off ++)
613 kup->type = PAGE_TYPE_SMALL;
621 rt_memset(z, 0,
sizeof(
struct rt_slab_zone));
624 off =
sizeof(
struct rt_slab_zone);
630 if ((size | (size - 1)) + 1 == (size << 1))
631 off = (off + size - 1) & ~(size - 1);
633 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
635 z->z_magic = ZALLOC_SLAB_MAGIC;
637 z->z_nmax = (slab->zone_size - off) / size;
638 z->z_nfree = z->z_nmax - 1;
641 z->z_chunksize = size;
643 chunk = (
struct rt_slab_chunk *)(z->z_baseptr + z->z_uindex * size);
646 z->z_next = slab->zone_array[zi];
647 slab->zone_array[zi] = z;
649 slab->parent.used += z->z_chunksize;
650 if (slab->parent.used > slab->parent.max)
651 slab->parent.max = slab->parent.used;
669void *rt_slab_realloc(rt_slab_t m,
void *ptr,
rt_size_t size)
672 struct rt_slab_zone *z;
673 struct rt_slab_memusage *kup;
674 struct rt_slab *slab = (
struct rt_slab *)m;
677 return rt_slab_alloc(m, size);
681 rt_slab_free(m, ptr);
690 if (kup->type == PAGE_TYPE_LARGE)
695 if ((nptr = rt_slab_alloc(m, size)) ==
RT_NULL)
697 rt_memcpy(nptr, ptr, size > osize ? osize : size);
698 rt_slab_free(m, ptr);
702 else if (kup->type == PAGE_TYPE_SMALL)
706 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
709 if (z->z_chunksize == size)
717 if ((nptr = rt_slab_alloc(m, size)) ==
RT_NULL)
720 rt_memcpy(nptr, ptr, size > z->z_chunksize ? z->z_chunksize : size);
721 rt_slab_free(m, ptr);
738void rt_slab_free(rt_slab_t m,
void *ptr)
740 struct rt_slab_zone *z;
741 struct rt_slab_chunk *chunk;
742 struct rt_slab_memusage *kup;
743 struct rt_slab *slab = (
struct rt_slab *)m;
750#if (DBG_LVL == DBG_LOG)
753 LOG_D(
"free a memory 0x%x and align to 0x%x, kup index %d",
762 if (kup->type == PAGE_TYPE_LARGE)
772 LOG_D(
"free large memory block 0x%x, page count %d",
776 rt_slab_page_free(m, ptr, size);
784 RT_ASSERT(z->z_magic == ZALLOC_SLAB_MAGIC);
786 chunk = (
struct rt_slab_chunk *)ptr;
787 chunk->c_next = z->z_freechunk;
788 z->z_freechunk = chunk;
790 slab->parent.used -= z->z_chunksize;
796 if (z->z_nfree++ == 0)
798 z->z_next = slab->zone_array[z->z_zoneindex];
799 slab->zone_array[z->z_zoneindex] = z;
808 if (z->z_nfree == z->z_nmax &&
809 (z->z_next || slab->zone_array[z->z_zoneindex] != z))
811 struct rt_slab_zone **pz;
813 LOG_D(
"free zone %#x, zoneindex %d",
817 for (pz = &slab->zone_array[z->z_zoneindex]; z != *pz; pz = &(*pz)->z_next)
825 z->z_next = slab->zone_free;
828 ++ slab->zone_free_cnt;
831 if (slab->zone_free_cnt > ZONE_RELEASE_THRESH)
836 slab->zone_free = z->z_next;
837 -- slab->zone_free_cnt;
840 for (i = 0, kup = btokup(z); i < slab->zone_page_cnt; i ++)
842 kup->type = PAGE_TYPE_FREE;
#define RT_ALIGN(size, align)
#define RT_ALIGN_DOWN(size, align)
void rt_object_init(struct rt_object *object, enum rt_object_class_type type, const char *name)
This function will initialize an object and add it to object system management.
rt_bool_t rt_object_is_systemobject(rt_object_t object)
This function will judge the object is system object or not.
rt_uint8_t rt_object_get_type(rt_object_t object)
This function will return the type of object without RT_Object_Class_Static flag.
void rt_object_detach(rt_object_t object)
This function will detach a static object from object system, and the memory of static object is not ...
#define RTM_EXPORT(symbol)