Merge branch 'slab/next' into slab/for-linus
Fix up a trivial merge conflict with commit baaf1dd ("mm/slob: use
min_t() to compare ARCH_SLAB_MINALIGN") that did not go through the slab
tree.
Conflicts:
mm/slob.c
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
commit
08afe22c68
8 changed files with 236 additions and 333 deletions
|
|
@ -128,10 +128,7 @@ struct page {
|
|||
};
|
||||
|
||||
struct list_head list; /* slobs list of pages */
|
||||
struct { /* slab fields */
|
||||
struct kmem_cache *slab_cache;
|
||||
struct slab *slab_page;
|
||||
};
|
||||
struct slab *slab_page; /* slab fields */
|
||||
};
|
||||
|
||||
/* Remainder is not double word aligned */
|
||||
|
|
@ -146,7 +143,7 @@ struct page {
|
|||
#if USE_SPLIT_PTLOCKS
|
||||
spinlock_t ptl;
|
||||
#endif
|
||||
struct kmem_cache *slab; /* SLUB: Pointer to slab */
|
||||
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
||||
struct page *first_page; /* Compound tail pages */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
|
|||
void kmem_cache_destroy(struct kmem_cache *);
|
||||
int kmem_cache_shrink(struct kmem_cache *);
|
||||
void kmem_cache_free(struct kmem_cache *, void *);
|
||||
unsigned int kmem_cache_size(struct kmem_cache *);
|
||||
|
||||
/*
|
||||
* Please use this macro to create slab caches. Simply specify the
|
||||
|
|
@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
|
|||
return kmalloc_node(size, flags | __GFP_ZERO, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the size of a slab object
|
||||
*/
|
||||
static inline unsigned int kmem_cache_size(struct kmem_cache *s)
|
||||
{
|
||||
return s->object_size;
|
||||
}
|
||||
|
||||
void __init kmem_cache_init_late(void);
|
||||
|
||||
#endif /* _LINUX_SLAB_H */
|
||||
|
|
|
|||
|
|
@ -89,9 +89,13 @@ struct kmem_cache {
|
|||
* (see kmem_cache_init())
|
||||
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
|
||||
* is statically defined, so we reserve the max number of cpus.
|
||||
*
|
||||
* We also need to guarantee that the list is able to accomodate a
|
||||
* pointer for each node since "nodelists" uses the remainder of
|
||||
* available pointers.
|
||||
*/
|
||||
struct kmem_list3 **nodelists;
|
||||
struct array_cache *array[NR_CPUS];
|
||||
struct array_cache *array[NR_CPUS + MAX_NUMNODES];
|
||||
/*
|
||||
* Do not add fields after array[]
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue