mm, mempool: do not allow atomic resizing
Allocating a large number of elements in atomic context could quickly deplete memory reserves, so just disallow atomic resizing entirely. Nothing currently uses mempool_resize() with anything other than GFP_KERNEL, so convert existing callers to drop the gfp_mask. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Steffen Maier <maier@linux.vnet.ibm.com> [zfcp] Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Steve French <sfrench@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2415b9f5cb
commit
11d8336045
4 changed files with 11 additions and 11 deletions
|
@ -738,11 +738,11 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
|
||||||
return ZFCP_ERP_FAILED;
|
return ZFCP_ERP_FAILED;
|
||||||
|
|
||||||
if (mempool_resize(act->adapter->pool.sr_data,
|
if (mempool_resize(act->adapter->pool.sr_data,
|
||||||
act->adapter->stat_read_buf_num, GFP_KERNEL))
|
act->adapter->stat_read_buf_num))
|
||||||
return ZFCP_ERP_FAILED;
|
return ZFCP_ERP_FAILED;
|
||||||
|
|
||||||
if (mempool_resize(act->adapter->pool.status_read_req,
|
if (mempool_resize(act->adapter->pool.status_read_req,
|
||||||
act->adapter->stat_read_buf_num, GFP_KERNEL))
|
act->adapter->stat_read_buf_num))
|
||||||
return ZFCP_ERP_FAILED;
|
return ZFCP_ERP_FAILED;
|
||||||
|
|
||||||
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
|
atomic_set(&act->adapter->stat_miss, act->adapter->stat_read_buf_num);
|
||||||
|
|
|
@ -773,8 +773,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||||
|
|
||||||
length = atomic_dec_return(&tcpSesAllocCount);
|
length = atomic_dec_return(&tcpSesAllocCount);
|
||||||
if (length > 0)
|
if (length > 0)
|
||||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
|
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
|
||||||
GFP_KERNEL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -848,8 +847,7 @@ cifs_demultiplex_thread(void *p)
|
||||||
|
|
||||||
length = atomic_inc_return(&tcpSesAllocCount);
|
length = atomic_inc_return(&tcpSesAllocCount);
|
||||||
if (length > 1)
|
if (length > 1)
|
||||||
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
|
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
|
||||||
GFP_KERNEL);
|
|
||||||
|
|
||||||
set_freezable();
|
set_freezable();
|
||||||
while (server->tcpStatus != CifsExiting) {
|
while (server->tcpStatus != CifsExiting) {
|
||||||
|
|
|
@ -29,7 +29,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||||
mempool_free_t *free_fn, void *pool_data,
|
mempool_free_t *free_fn, void *pool_data,
|
||||||
gfp_t gfp_mask, int nid);
|
gfp_t gfp_mask, int nid);
|
||||||
|
|
||||||
extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
|
extern int mempool_resize(mempool_t *pool, int new_min_nr);
|
||||||
extern void mempool_destroy(mempool_t *pool);
|
extern void mempool_destroy(mempool_t *pool);
|
||||||
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
|
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
|
||||||
extern void mempool_free(void *element, mempool_t *pool);
|
extern void mempool_free(void *element, mempool_t *pool);
|
||||||
|
|
10
mm/mempool.c
10
mm/mempool.c
|
@ -113,23 +113,24 @@ EXPORT_SYMBOL(mempool_create_node);
|
||||||
* mempool_create().
|
* mempool_create().
|
||||||
* @new_min_nr: the new minimum number of elements guaranteed to be
|
* @new_min_nr: the new minimum number of elements guaranteed to be
|
||||||
* allocated for this pool.
|
* allocated for this pool.
|
||||||
* @gfp_mask: the usual allocation bitmask.
|
|
||||||
*
|
*
|
||||||
* This function shrinks/grows the pool. In the case of growing,
|
* This function shrinks/grows the pool. In the case of growing,
|
||||||
* it cannot be guaranteed that the pool will be grown to the new
|
* it cannot be guaranteed that the pool will be grown to the new
|
||||||
* size immediately, but new mempool_free() calls will refill it.
|
* size immediately, but new mempool_free() calls will refill it.
|
||||||
|
* This function may sleep.
|
||||||
*
|
*
|
||||||
* Note, the caller must guarantee that no mempool_destroy is called
|
* Note, the caller must guarantee that no mempool_destroy is called
|
||||||
* while this function is running. mempool_alloc() & mempool_free()
|
* while this function is running. mempool_alloc() & mempool_free()
|
||||||
* might be called (eg. from IRQ contexts) while this function executes.
|
* might be called (eg. from IRQ contexts) while this function executes.
|
||||||
*/
|
*/
|
||||||
int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
|
int mempool_resize(mempool_t *pool, int new_min_nr)
|
||||||
{
|
{
|
||||||
void *element;
|
void *element;
|
||||||
void **new_elements;
|
void **new_elements;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
BUG_ON(new_min_nr <= 0);
|
BUG_ON(new_min_nr <= 0);
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
if (new_min_nr <= pool->min_nr) {
|
if (new_min_nr <= pool->min_nr) {
|
||||||
|
@ -145,7 +146,8 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
|
||||||
/* Grow the pool */
|
/* Grow the pool */
|
||||||
new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
|
new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!new_elements)
|
if (!new_elements)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -164,7 +166,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
|
||||||
|
|
||||||
while (pool->curr_nr < pool->min_nr) {
|
while (pool->curr_nr < pool->min_nr) {
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
element = pool->alloc(gfp_mask, pool->pool_data);
|
element = pool->alloc(GFP_KERNEL, pool->pool_data);
|
||||||
if (!element)
|
if (!element)
|
||||||
goto out;
|
goto out;
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue