staging/lustre/ldlm: ELC picks locks in a safer policy
Change the policy of ELC to pick locks that have no dirty pages, no page in writeback state, and no locked pages. Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com> Reviewed-on: http://review.whamcloud.com/9175 Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4300 Reviewed-by: Andreas Dilger <andreas.dilger@intel.com> Reviewed-by: Bobi Jam <bobijam@gmail.com> Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
047d41bd71
commit
7d44333467
5 changed files with 39 additions and 29 deletions
|
@ -270,7 +270,7 @@ struct ldlm_pool {
|
||||||
struct completion pl_kobj_unregister;
|
struct completion pl_kobj_unregister;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
|
typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* LVB operations.
|
* LVB operations.
|
||||||
|
@ -447,8 +447,11 @@ struct ldlm_namespace {
|
||||||
/** Limit of parallel AST RPC count. */
|
/** Limit of parallel AST RPC count. */
|
||||||
unsigned ns_max_parallel_ast;
|
unsigned ns_max_parallel_ast;
|
||||||
|
|
||||||
/** Callback to cancel locks before replaying it during recovery. */
|
/**
|
||||||
ldlm_cancel_for_recovery ns_cancel_for_recovery;
|
* Callback to check if a lock is good to be canceled by ELC or
|
||||||
|
* during recovery.
|
||||||
|
*/
|
||||||
|
ldlm_cancel_cbt ns_cancel;
|
||||||
|
|
||||||
/** LDLM lock stats */
|
/** LDLM lock stats */
|
||||||
struct lprocfs_stats *ns_stats;
|
struct lprocfs_stats *ns_stats;
|
||||||
|
@ -480,9 +483,9 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ns_register_cancel(struct ldlm_namespace *ns,
|
static inline void ns_register_cancel(struct ldlm_namespace *ns,
|
||||||
ldlm_cancel_for_recovery arg)
|
ldlm_cancel_cbt arg)
|
||||||
{
|
{
|
||||||
ns->ns_cancel_for_recovery = arg;
|
ns->ns_cancel = arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ldlm_lock;
|
struct ldlm_lock;
|
||||||
|
|
|
@ -1137,7 +1137,6 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
|
||||||
int count)
|
int count)
|
||||||
{
|
{
|
||||||
ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
|
ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
|
||||||
ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
|
|
||||||
|
|
||||||
/* don't check added & count since we want to process all locks
|
/* don't check added & count since we want to process all locks
|
||||||
* from unused list.
|
* from unused list.
|
||||||
|
@ -1147,7 +1146,7 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
|
||||||
switch (lock->l_resource->lr_type) {
|
switch (lock->l_resource->lr_type) {
|
||||||
case LDLM_EXTENT:
|
case LDLM_EXTENT:
|
||||||
case LDLM_IBITS:
|
case LDLM_IBITS:
|
||||||
if (cb && cb(lock))
|
if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
result = LDLM_POLICY_SKIP_LOCK;
|
result = LDLM_POLICY_SKIP_LOCK;
|
||||||
|
@ -1197,8 +1196,13 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
|
||||||
/* Stop when SLV is not yet come from server or lv is smaller than
|
/* Stop when SLV is not yet come from server or lv is smaller than
|
||||||
* it is.
|
* it is.
|
||||||
*/
|
*/
|
||||||
return (slv == 0 || lv < slv) ?
|
if (slv == 0 || lv < slv)
|
||||||
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
|
return LDLM_POLICY_KEEP_LOCK;
|
||||||
|
|
||||||
|
if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
|
||||||
|
return LDLM_POLICY_KEEP_LOCK;
|
||||||
|
|
||||||
|
return LDLM_POLICY_CANCEL_LOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1236,11 +1240,17 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
|
||||||
int unused, int added,
|
int unused, int added,
|
||||||
int count)
|
int count)
|
||||||
{
|
{
|
||||||
/* Stop LRU processing if young lock is found and we reach past count */
|
if (added >= count)
|
||||||
return ((added >= count) &&
|
return LDLM_POLICY_KEEP_LOCK;
|
||||||
time_before(cfs_time_current(),
|
|
||||||
cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
|
if (time_before(cfs_time_current(),
|
||||||
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
|
cfs_time_add(lock->l_last_used, ns->ns_max_age)))
|
||||||
|
return LDLM_POLICY_KEEP_LOCK;
|
||||||
|
|
||||||
|
if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
|
||||||
|
return LDLM_POLICY_KEEP_LOCK;
|
||||||
|
|
||||||
|
return LDLM_POLICY_CANCEL_LOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2249,7 +2249,7 @@ static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
|
||||||
* recovery, non zero value will be return if the lock can be canceled,
|
* recovery, non zero value will be return if the lock can be canceled,
|
||||||
* or zero returned for not
|
* or zero returned for not
|
||||||
*/
|
*/
|
||||||
static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
|
static int mdc_cancel_weight(struct ldlm_lock *lock)
|
||||||
{
|
{
|
||||||
if (lock->l_resource->lr_type != LDLM_IBITS)
|
if (lock->l_resource->lr_type != LDLM_IBITS)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2331,7 +2331,7 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
|
||||||
sptlrpc_lprocfs_cliobd_attach(obd);
|
sptlrpc_lprocfs_cliobd_attach(obd);
|
||||||
ptlrpc_lprocfs_register_obd(obd);
|
ptlrpc_lprocfs_register_obd(obd);
|
||||||
|
|
||||||
ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
|
ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
|
||||||
|
|
||||||
obd->obd_namespace->ns_lvbo = &inode_lvbo;
|
obd->obd_namespace->ns_lvbo = &inode_lvbo;
|
||||||
|
|
||||||
|
|
|
@ -635,7 +635,9 @@ static int weigh_cb(const struct lu_env *env, struct cl_io *io,
|
||||||
{
|
{
|
||||||
struct cl_page *page = ops->ops_cl.cpl_page;
|
struct cl_page *page = ops->ops_cl.cpl_page;
|
||||||
|
|
||||||
if (cl_page_is_vmlocked(env, page)) {
|
if (cl_page_is_vmlocked(env, page) ||
|
||||||
|
PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
|
||||||
|
) {
|
||||||
(*(unsigned long *)cbdata)++;
|
(*(unsigned long *)cbdata)++;
|
||||||
return CLP_GANG_ABORT;
|
return CLP_GANG_ABORT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2292,15 +2292,13 @@ no_match:
|
||||||
if (*flags & LDLM_FL_TEST_LOCK)
|
if (*flags & LDLM_FL_TEST_LOCK)
|
||||||
return -ENOLCK;
|
return -ENOLCK;
|
||||||
if (intent) {
|
if (intent) {
|
||||||
LIST_HEAD(cancels);
|
|
||||||
|
|
||||||
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
|
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
|
||||||
&RQF_LDLM_ENQUEUE_LVB);
|
&RQF_LDLM_ENQUEUE_LVB);
|
||||||
if (!req)
|
if (!req)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
|
rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
|
||||||
if (rc) {
|
if (rc < 0) {
|
||||||
ptlrpc_request_free(req);
|
ptlrpc_request_free(req);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -3110,17 +3108,14 @@ static int osc_import_event(struct obd_device *obd,
|
||||||
* \retval zero the lock can't be canceled
|
* \retval zero the lock can't be canceled
|
||||||
* \retval other ok to cancel
|
* \retval other ok to cancel
|
||||||
*/
|
*/
|
||||||
static int osc_cancel_for_recovery(struct ldlm_lock *lock)
|
static int osc_cancel_weight(struct ldlm_lock *lock)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
|
* Cancel all unused and granted extent lock.
|
||||||
*
|
|
||||||
* XXX as a future improvement, we can also cancel unused write lock
|
|
||||||
* if it doesn't have dirty data and active mmaps.
|
|
||||||
*/
|
*/
|
||||||
if (lock->l_resource->lr_type == LDLM_EXTENT &&
|
if (lock->l_resource->lr_type == LDLM_EXTENT &&
|
||||||
(lock->l_granted_mode == LCK_PR ||
|
lock->l_granted_mode == lock->l_req_mode &&
|
||||||
lock->l_granted_mode == LCK_CR) && osc_ldlm_weigh_ast(lock) == 0)
|
osc_ldlm_weigh_ast(lock) == 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3197,7 +3192,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
|
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
|
||||||
ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
|
ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
out_ptlrpcd_work:
|
out_ptlrpcd_work:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue