| .. | .. |
|---|
| 19 | 19 | |
|---|
| 20 | 20 | struct kmem_cache { |
|---|
| 21 | 21 | pthread_mutex_t lock; |
|---|
| 22 | | - int size; |
|---|
| 22 | + unsigned int size; |
|---|
| 23 | + unsigned int align; |
|---|
| 23 | 24 | int nr_objs; |
|---|
| 24 | 25 | void *objs; |
|---|
| 25 | 26 | void (*ctor)(void *); |
|---|
| 26 | 27 | }; |
|---|
| 27 | 28 | |
|---|
| 28 | | -void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) |
|---|
| 29 | +void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp) |
|---|
| 29 | 30 | { |
|---|
| 30 | | - struct radix_tree_node *node; |
|---|
| 31 | + void *p; |
|---|
| 31 | 32 | |
|---|
| 32 | | - if (!(flags & __GFP_DIRECT_RECLAIM)) |
|---|
| 33 | + if (!(gfp & __GFP_DIRECT_RECLAIM)) |
|---|
| 33 | 34 | return NULL; |
|---|
| 34 | 35 | |
|---|
| 35 | 36 | pthread_mutex_lock(&cachep->lock); |
|---|
| 36 | 37 | if (cachep->nr_objs) { |
|---|
| 38 | + struct radix_tree_node *node = cachep->objs; |
|---|
| 37 | 39 | cachep->nr_objs--; |
|---|
| 38 | | - node = cachep->objs; |
|---|
| 39 | 40 | cachep->objs = node->parent; |
|---|
| 40 | 41 | pthread_mutex_unlock(&cachep->lock); |
|---|
| 41 | 42 | node->parent = NULL; |
|---|
| 43 | + p = node; |
|---|
| 42 | 44 | } else { |
|---|
| 43 | 45 | pthread_mutex_unlock(&cachep->lock); |
|---|
| 44 | | - node = malloc(cachep->size); |
|---|
| 46 | + if (cachep->align) |
|---|
| 47 | + posix_memalign(&p, cachep->align, cachep->size); |
|---|
| 48 | + else |
|---|
| 49 | + p = malloc(cachep->size); |
|---|
| 45 | 50 | if (cachep->ctor) |
|---|
| 46 | | - cachep->ctor(node); |
|---|
| 51 | + cachep->ctor(p); |
|---|
| 52 | + else if (gfp & __GFP_ZERO) |
|---|
| 53 | + memset(p, 0, cachep->size); |
|---|
| 47 | 54 | } |
|---|
| 48 | 55 | |
|---|
| 49 | 56 | uatomic_inc(&nr_allocated); |
|---|
| 50 | 57 | if (kmalloc_verbose) |
|---|
| 51 | | - printf("Allocating %p from slab\n", node); |
|---|
| 52 | | - return node; |
|---|
| 58 | + printf("Allocating %p from slab\n", p); |
|---|
| 59 | + return p; |
|---|
| 53 | 60 | } |
|---|
| 54 | 61 | |
|---|
| 55 | 62 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) |
|---|
| .. | .. |
|---|
| 59 | 66 | if (kmalloc_verbose) |
|---|
| 60 | 67 | printf("Freeing %p to slab\n", objp); |
|---|
| 61 | 68 | pthread_mutex_lock(&cachep->lock); |
|---|
| 62 | | - if (cachep->nr_objs > 10) { |
|---|
| 69 | + if (cachep->nr_objs > 10 || cachep->align) { |
|---|
| 63 | 70 | memset(objp, POISON_FREE, cachep->size); |
|---|
| 64 | 71 | free(objp); |
|---|
| 65 | 72 | } else { |
|---|
| .. | .. |
|---|
| 98 | 105 | } |
|---|
| 99 | 106 | |
|---|
| 100 | 107 | struct kmem_cache * |
|---|
| 101 | | -kmem_cache_create(const char *name, size_t size, size_t offset, |
|---|
| 102 | | - unsigned long flags, void (*ctor)(void *)) |
|---|
| 108 | +kmem_cache_create(const char *name, unsigned int size, unsigned int align, |
|---|
| 109 | + unsigned int flags, void (*ctor)(void *)) |
|---|
| 103 | 110 | { |
|---|
| 104 | 111 | struct kmem_cache *ret = malloc(sizeof(*ret)); |
|---|
| 105 | 112 | |
|---|
| 106 | 113 | pthread_mutex_init(&ret->lock, NULL); |
|---|
| 107 | 114 | ret->size = size; |
|---|
| 115 | + ret->align = align; |
|---|
| 108 | 116 | ret->nr_objs = 0; |
|---|
| 109 | 117 | ret->objs = NULL; |
|---|
| 110 | 118 | ret->ctor = ctor; |
|---|